From 3ac5f2f962fe63c024856b28917589a3a8b3882a Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 22 May 2025 12:14:46 -0400 Subject: [PATCH 001/139] [dev.simd] codereview.cfg: set up dev.simd branch Change-Id: I3d9c9f706bdb11cc6786f150be17e68ed06892d3 Reviewed-on: https://go-review.googlesource.com/c/go/+/675595 Reviewed-by: David Chase Auto-Submit: Cherry Mui LUCI-TryBot-Result: Go LUCI --- codereview.cfg | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/codereview.cfg b/codereview.cfg index 77a74f108eae36..e59a1c89121b4c 100644 --- a/codereview.cfg +++ b/codereview.cfg @@ -1 +1,2 @@ -branch: master +branch: dev.simd +parent-branch: master From 4d2c71ebf9b05c50d4078fe1735fb4d0d1d26572 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 22 May 2025 15:08:05 -0400 Subject: [PATCH 002/139] [dev.simd] internal/goexperiment: add SIMD goexperiment We'll use it to guard the simd package, and the compiler's handling of SIMD types and intrinsics. Change-Id: I0356368eea0a98a5016baaaf7acb7da8b6305429 Reviewed-on: https://go-review.googlesource.com/c/go/+/675536 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/internal/goexperiment/exp_simd_off.go | 8 ++++++++ src/internal/goexperiment/exp_simd_on.go | 8 ++++++++ src/internal/goexperiment/flags.go | 4 ++++ 3 files changed, 20 insertions(+) create mode 100644 src/internal/goexperiment/exp_simd_off.go create mode 100644 src/internal/goexperiment/exp_simd_on.go diff --git a/src/internal/goexperiment/exp_simd_off.go b/src/internal/goexperiment/exp_simd_off.go new file mode 100644 index 00000000000000..ebc40b308e384d --- /dev/null +++ b/src/internal/goexperiment/exp_simd_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.simd + +package goexperiment + +const SIMD = false +const SIMDInt = 0 diff --git a/src/internal/goexperiment/exp_simd_on.go b/src/internal/goexperiment/exp_simd_on.go new file mode 100644 index 00000000000000..137d1dd1ba3fba --- /dev/null +++ b/src/internal/goexperiment/exp_simd_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.simd + +package goexperiment + +const SIMD = true +const SIMDInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index ceff24193d89a5..b693ed883a51d3 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -129,4 +129,8 @@ type Flags struct { // GreenTeaGC enables the Green Tea GC implementation. GreenTeaGC bool + + // SIMD enables the simd package and the compiler's handling + // of SIMD intrinsics. + SIMD bool } From 2ef7106881db51b485f092af93c1a1f01b60ab16 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 22 May 2025 18:14:51 -0400 Subject: [PATCH 003/139] [dev.simd] internal/buildcfg: enable SIMD GOEXPERIMENT for amd64 Since we are developing and testing this, the default is on. This may still cause us a little headache when developing on other-architecture laptops. Change-Id: I9e9e5ea4ff2312c0c8385386b5012370f00dbfbd Reviewed-on: https://go-review.googlesource.com/c/go/+/675735 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/internal/buildcfg/exp.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index e36ec08a5b0232..17a02415c42b07 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -84,6 +84,7 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { AliasTypeParams: true, SwissMap: true, SyncHashTrieMap: true, + SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged Dwarf5: dwarf5Supported, } From 04b1030ae488851278257bac66ccf9925f1b87fb Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 31 Mar 2025 10:45:23 +1100 Subject: [PATCH 004/139] [dev.simd] cmd/compile: adapters for simd This combines several CLs into a single patch of "glue" for the generated SIMD extensions. This glue includes GOEXPERIMENT checks that disable the creation of user-visible "simd" types and that disable the registration of "simd" intrinsics. The simd type checks were changed to work for either package "simd" or "internal/simd" so that moving that package won't be quite so fragile. cmd/compile, internal/simd: glue for adding SIMD extensions to Go cmd/compile: theft of Cherry's sample SIMD compilation Change-Id: Id44e2f4bafe74032c26de576a8691b6f7d977e01 Reviewed-on: https://go-review.googlesource.com/c/go/+/675598 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/abi/abiutils.go | 11 +- src/cmd/compile/internal/amd64/simdssa.go | 19 + src/cmd/compile/internal/amd64/ssa.go | 163 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 33 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 75 +- .../compile/internal/ssa/_gen/generic.rules | 2 +- .../compile/internal/ssa/_gen/genericOps.go | 5 + src/cmd/compile/internal/ssa/_gen/main.go | 10 + src/cmd/compile/internal/ssa/_gen/rulegen.go | 9 + .../compile/internal/ssa/_gen/simdAMD64.rules | 4 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 10 + .../internal/ssa/_gen/simdgenericOps.go | 10 + src/cmd/compile/internal/ssa/config.go | 8 + src/cmd/compile/internal/ssa/decompose.go | 18 +- src/cmd/compile/internal/ssa/expand_calls.go | 13 +- src/cmd/compile/internal/ssa/opGen.go | 1775 +++++++++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 320 +++ .../compile/internal/ssa/rewritegeneric.go | 4 +- src/cmd/compile/internal/ssa/value.go | 3 + src/cmd/compile/internal/ssagen/intrinsics.go | 101 +- .../compile/internal/ssagen/simdintrinsics.go | 15 + src/cmd/compile/internal/ssagen/ssa.go | 25 +- src/cmd/compile/internal/types/size.go | 52 + src/cmd/compile/internal/types/type.go | 34 +- src/internal/simd/dummy.s | 7 + src/internal/simd/testdata/sample.go | 145 ++ 26 files changed, 2196 insertions(+), 675 deletions(-) create mode 100644 src/cmd/compile/internal/amd64/simdssa.go create mode 100644 src/cmd/compile/internal/ssa/_gen/simdAMD64.rules create mode 100644 src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go create mode 100644 src/cmd/compile/internal/ssa/_gen/simdgenericOps.go create mode 100644 src/cmd/compile/internal/ssagen/simdintrinsics.go create mode 100644 src/internal/simd/dummy.s create mode 100644 src/internal/simd/testdata/sample.go diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go index c013aba19c41a6..cef78858151501 100644 --- a/src/cmd/compile/internal/abi/abiutils.go +++ b/src/cmd/compile/internal/abi/abiutils.go @@ -150,12 +150,12 @@ func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type { if w == 0 { return rts } - if t.IsScalar() || t.IsPtrShaped() { + if t.IsScalar() || t.IsPtrShaped() || t.IsSIMD() { if t.IsComplex() { c := types.FloatForComplex(t) return append(rts, c, c) } else { - if int(t.Size()) <= types.RegSize { + if int(t.Size()) <= types.RegSize || t.IsSIMD() { return append(rts, t) } // assume 64bit int on 32-bit machine @@ -199,6 +199,9 @@ func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int6 if w == 0 { return offsets, at } + if t.IsSIMD() { + return append(offsets, at), at + w + } if t.IsScalar() || t.IsPtrShaped() { if t.IsComplex() || int(t.Size()) > types.RegSize { // complex and *int64 on 32-bit s := w / 2 @@ -521,11 +524,11 @@ func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegInde } ri := state.rUsed.intRegs rf := state.rUsed.floatRegs - if t.IsScalar() || t.IsPtrShaped() { + if t.IsScalar() || t.IsPtrShaped() || t.IsSIMD() { if t.IsComplex() { regs = append(regs, RegIndex(rf+state.rTotal.intRegs), RegIndex(rf+1+state.rTotal.intRegs)) rf += 2 - } else if t.IsFloat() { + } else if t.IsFloat() || t.IsSIMD() { regs = append(regs, RegIndex(rf+state.rTotal.intRegs)) rf += 1 } else { diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go new file mode 100644 index 00000000000000..0cd9b8548df67c --- /dev/null +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -0,0 +1,19 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Placeholder for generated glue to come later +package amd64 + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" +) + +func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { + switch v.Op { + default: + return false + } + return true +} diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 3af513773d3b2e..cf5f8134560283 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -67,6 +67,8 @@ func storeByType(t *types.Type) obj.As { case 8: return x86.AMOVSD } + } else if t.IsSIMD() { + return simdMov(width) } else { switch width { case 1: @@ -92,6 +94,8 @@ func moveByType(t *types.Type) obj.As { // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. return x86.AMOVUPS + } else if t.IsSIMD() { + return simdMov(t.Size()) } else { switch t.Size() { case 1: @@ -1038,6 +1042,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } x := v.Args[0].Reg() y := v.Reg() + if v.Type.IsSIMD() { + x = simdReg(v.Args[0]) + y = simdReg(v) + } if x != y { opregreg(s, moveByType(v.Type), y, x) } @@ -1049,16 +1057,24 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(loadByType(v.Type)) ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() + r := v.Reg() + if v.Type.IsSIMD() { + r = simdReg(v) + } + p.To.Reg = r case ssa.OpStoreReg: if v.Type.IsFlags() { v.Fatalf("store flags not implemented: %v", v.LongString()) return } + r := v.Args[0].Reg() + if v.Type.IsSIMD() { + r = simdReg(v.Args[0]) + } p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[0].Reg() + p.From.Reg = r ssagen.AddrAuto(&p.To, v) case ssa.OpAMD64LoweredHasCPUFeature: p := s.Prog(x86.AMOVBLZX) @@ -1426,11 +1442,125 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Offset = int64(x) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + + // XXX SIMD + // XXX may change depending on how we handle aliased registers + case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v) + p.AddRestSourceReg(simdReg(v)) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + case ssa.OpAMD64VPADDD4: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + + case ssa.OpAMD64VPMOVMToVec8x16, + ssa.OpAMD64VPMOVMToVec8x32, + ssa.OpAMD64VPMOVMToVec8x64, + ssa.OpAMD64VPMOVMToVec16x8, + ssa.OpAMD64VPMOVMToVec16x16, + ssa.OpAMD64VPMOVMToVec16x32, + ssa.OpAMD64VPMOVMToVec32x4, + ssa.OpAMD64VPMOVMToVec32x8, + ssa.OpAMD64VPMOVMToVec32x16, + ssa.OpAMD64VPMOVMToVec64x2, + ssa.OpAMD64VPMOVMToVec64x4, + ssa.OpAMD64VPMOVMToVec64x8: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + + case ssa.OpAMD64VPMOVVec8x16ToM, + ssa.OpAMD64VPMOVVec8x32ToM, + ssa.OpAMD64VPMOVVec8x64ToM, + ssa.OpAMD64VPMOVVec16x8ToM, + ssa.OpAMD64VPMOVVec16x16ToM, + ssa.OpAMD64VPMOVVec16x32ToM, + ssa.OpAMD64VPMOVVec32x4ToM, + ssa.OpAMD64VPMOVVec32x8ToM, + ssa.OpAMD64VPMOVVec32x16ToM, + ssa.OpAMD64VPMOVVec64x2ToM, + ssa.OpAMD64VPMOVVec64x4ToM, + ssa.OpAMD64VPMOVVec64x8ToM: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + default: - v.Fatalf("genValue not implemented: %s", v.LongString()) + if !ssaGenSIMDValue(s, v) { + v.Fatalf("genValue not implemented: %s", v.LongString()) + } } } +func simdGenUnary(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + +func simdGenBinary(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + +func simdGenUnaryImmUint8(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + +func simdGenBinaryImmUint8(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + var blockJump = [...]struct { asm, invasm obj.As }{ @@ -1532,3 +1662,30 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in p.Pos = p.Pos.WithNotStmt() return p } + +// XXX maybe make this part of v.Reg? +// On the other hand, it is architecture-specific. +func simdReg(v *ssa.Value) int16 { + t := v.Type + if !t.IsSIMD() { + panic("simdReg: not a simd type") + } + switch t.Size() { + case 16: + return v.Reg() + case 32: + return v.Reg() + (x86.REG_Y0 - x86.REG_X0) + case 64: + return v.Reg() + (x86.REG_Z0 - x86.REG_X0) + } + panic("unreachable") +} + +func simdMov(width int64) obj.As { + if width >= 64 { + return x86.AVMOVDQU64 + } else if width >= 16 { + return x86.AVMOVDQU + } + return x86.AKMOVQ +} diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index d55dfe70acc155..2972eae87d5479 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1680,3 +1680,36 @@ // If we don't use the flags any more, just use the standard op. (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x) + +// XXX SIMD +(Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) + +(Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) + +(Load ptr mem) && t.Size() == 32 => (VMOVDQUload256 ptr mem) + +(Store {t} ptr val mem) && t.Size() == 32 => (VMOVDQUstore256 ptr val mem) + +(Load ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem) + +(Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem) + +(ZeroSIMD ) && t.Size() == 16 => (Zero128 ) +(ZeroSIMD ) && t.Size() == 32 => (Zero256 ) +(ZeroSIMD ) && t.Size() == 64 => (Zero512 ) + +(VPMOVVec8x16ToM (VPMOVMToVec8x16 x)) => x +(VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) => x +(VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) => x + +(VPMOVVec16x8ToM (VPMOVMToVec16x8 x)) => x +(VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) => x +(VPMOVVec16x32ToM (VPMOVMToVec16x32 x)) => x + +(VPMOVVec32x4ToM (VPMOVMToVec32x4 x)) => x +(VPMOVVec32x8ToM (VPMOVMToVec32x8 x)) => x +(VPMOVVec32x16ToM (VPMOVMToVec32x16 x)) => x + +(VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x +(VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x +(VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index a8ec2a278c964c..aafe4d179bb230 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -63,6 +63,16 @@ var regNamesAMD64 = []string{ "X14", "X15", // constant 0 in ABIInternal + // TODO: update asyncPreempt for K registers. + // asyncPreempt also needs to store Z0-Z15 properly. + "K0", + "K1", + "K2", + "K3", + "K4", + "K5", + "K6", + "K7", // If you add registers, update asyncPreempt in runtime // pseudo-registers @@ -100,6 +110,7 @@ func init() { g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") x15 = buildReg("X15") + mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") gpspsb = gpsp | buildReg("SB") gpspsbg = gpspsb | g @@ -107,8 +118,9 @@ func init() { ) // Common slices of register masks var ( - gponly = []regMask{gp} - fponly = []regMask{fp} + gponly = []regMask{gp} + fponly = []regMask{fp} + maskonly = []regMask{mask} ) // Common regInfo @@ -170,6 +182,12 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + fp1m1 = regInfo{inputs: fponly, outputs: maskonly} + m1fp1 = regInfo{inputs: maskonly, outputs: fponly} + fp2m1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1199,6 +1217,54 @@ func init() { // // output[i] = (input[i] >> 7) & 1 {name: "PMOVMSKB", argLength: 1, reg: fpgp, asm: "PMOVMSKB"}, + + // XXX SIMD + {name: "VPADDD4", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true}, // arg0 + arg1 + + {name: "VMOVDQUload128", argLength: 2, reg: fpload, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem + {name: "VMOVDQUstore128", argLength: 3, reg: fpstore, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + + {name: "VMOVDQUload256", argLength: 2, reg: fpload, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem + {name: "VMOVDQUstore256", argLength: 3, reg: fpstore, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + + {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem + {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + + {name: "VPMOVMToVec8x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + + {name: "VPMOVMToVec16x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + + {name: "VPMOVMToVec32x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + + {name: "VPMOVMToVec64x2", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + + {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + + {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + + {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + + {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + + {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, + {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, + {name: "Zero512", argLength: 0, reg: fp01, asm: "VPXORQ"}, } var AMD64blocks = []blockData{ @@ -1230,14 +1296,15 @@ func init() { name: "AMD64", pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", - ops: AMD64ops, + genSIMDfile: "../../amd64/simdssa.go", + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", ParamFloatRegNames: "X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14", gpregmask: gp, fpregmask: fp, - specialregmask: x15, + specialregmask: x15 | mask, framepointerreg: int8(num["BP"]), linkreg: -1, // not used }) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index b178a1add6de29..1077921f93400c 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -910,7 +910,7 @@ // struct operations (StructSelect [i] x:(StructMake ___)) => x.Args[i] -(Load _ _) && t.IsStruct() && CanSSA(t) => rewriteStructLoad(v) +(Load _ _) && t.IsStruct() && CanSSA(t) && !t.IsSIMD() => rewriteStructLoad(v) (Store _ (StructMake ___) _) => rewriteStructStore(v) (StructSelect [i] x:(Load ptr mem)) && !CanSSA(t) => diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 1f6ad4e16d98f8..2d44cc85f8242e 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -662,6 +662,10 @@ var genericOps = []opData{ // Prefetch instruction {name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory. {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory. + + // XXX SIMD + {name: "Add32x4", argLength: 2}, // arg0 + arg1 + {name: "ZeroSIMD", argLength: 0}, } // kind controls successors implicit exit @@ -689,6 +693,7 @@ var genericBlocks = []blockData{ } func init() { + genericOps = append(genericOps, simdGenericOps()...) archs = append(archs, arch{ name: "generic", ops: genericOps, diff --git a/src/cmd/compile/internal/ssa/_gen/main.go b/src/cmd/compile/internal/ssa/_gen/main.go index 3f65831b6e02b5..13d3ce6f8f6305 100644 --- a/src/cmd/compile/internal/ssa/_gen/main.go +++ b/src/cmd/compile/internal/ssa/_gen/main.go @@ -32,6 +32,7 @@ type arch struct { name string pkg string // obj package to import for this arch. genfile string // source file containing opcode code generation. + genSIMDfile string // source file containing opcode code generation for SIMD. ops []opData blocks []blockData regnames []string @@ -525,6 +526,15 @@ func genOp() { if err != nil { log.Fatalf("can't read %s: %v", a.genfile, err) } + // Append the file of simd operations, too + if a.genSIMDfile != "" { + simdSrc, err := os.ReadFile(a.genSIMDfile) + if err != nil { + log.Fatalf("can't read %s: %v", a.genSIMDfile, err) + } + src = append(src, simdSrc...) + } + seen := make(map[string]bool, len(a.ops)) for _, m := range rxOp.FindAllSubmatch(src, -1) { seen[string(m[1])] = true diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index c2891da6c8d368..558bbab6a75a9d 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -95,6 +95,7 @@ func genLateLowerRules(arch arch) { genRulesSuffix(arch, "latelower") } func genRulesSuffix(arch arch, suff string) { // Open input file. + var text io.Reader text, err := os.Open(arch.name + suff + ".rules") if err != nil { if suff == "" { @@ -105,6 +106,14 @@ func genRulesSuffix(arch arch, suff string) { return } + // Check for file of SIMD rules to add + if suff == "" { + simdtext, err := os.Open("simd" + arch.name + ".rules") + if err == nil { + text = io.MultiReader(text, simdtext) + } + } + // oprules contains a list of rules for each block and opcode blockrules := map[string][]Rule{} oprules := map[string][]Rule{} diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules new file mode 100644 index 00000000000000..3c6be4ccef85d9 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -0,0 +1,4 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +// (AddInt8x16 ...) => (VPADDB ...) +// etc diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go new file mode 100644 index 00000000000000..b0852dba3dc29f --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -0,0 +1,10 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +package main + +func simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1 regInfo) []opData { + return []opData{ + // {name: "VPADDB", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true}, + // etc, generated + } +} diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go new file mode 100644 index 00000000000000..666d6879d69198 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -0,0 +1,10 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +package main + +func simdGenericOps() []opData { + return []opData{ + // {name: "AddInt8x16", argLength: 2, commutative: true}, + // etc + } +} diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index d4cd32a0d7b5b6..0299e808c6d365 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -89,6 +89,10 @@ type Types struct { Float32Ptr *types.Type Float64Ptr *types.Type BytePtrPtr *types.Type + Vec128 *types.Type + Vec256 *types.Type + Vec512 *types.Type + Mask *types.Type } // NewTypes creates and populates a Types. @@ -123,6 +127,10 @@ func (t *Types) SetTypPtrs() { t.Float32Ptr = types.NewPtr(types.Types[types.TFLOAT32]) t.Float64Ptr = types.NewPtr(types.Types[types.TFLOAT64]) t.BytePtrPtr = types.NewPtr(types.NewPtr(types.Types[types.TUINT8])) + t.Vec128 = types.TypeVec128 + t.Vec256 = types.TypeVec256 + t.Vec512 = types.TypeVec512 + t.Mask = types.TypeMask } type Logger interface { diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index cf9285741ed085..c3d9997793ef05 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -100,7 +100,7 @@ func decomposeBuiltIn(f *Func) { } case t.IsFloat(): // floats are never decomposed, even ones bigger than RegSize - case t.Size() > f.Config.RegSize: + case t.Size() > f.Config.RegSize && !t.IsSIMD(): f.Fatalf("undecomposed named type %s %v", name, t) } } @@ -135,7 +135,7 @@ func decomposeBuiltInPhi(v *Value) { decomposeInterfacePhi(v) case v.Type.IsFloat(): // floats are never decomposed, even ones bigger than RegSize - case v.Type.Size() > v.Block.Func.Config.RegSize: + case v.Type.Size() > v.Block.Func.Config.RegSize && !v.Type.IsSIMD(): v.Fatalf("%v undecomposed type %v", v, v.Type) } } @@ -248,7 +248,7 @@ func decomposeUser(f *Func) { for _, name := range f.Names { t := name.Type switch { - case t.IsStruct(): + case isStructNotSIMD(t): newNames = decomposeUserStructInto(f, name, newNames) case t.IsArray(): newNames = decomposeUserArrayInto(f, name, newNames) @@ -293,7 +293,7 @@ func decomposeUserArrayInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Loc if t.Elem().IsArray() { return decomposeUserArrayInto(f, elemName, slots) - } else if t.Elem().IsStruct() { + } else if isStructNotSIMD(t.Elem()) { return decomposeUserStructInto(f, elemName, slots) } @@ -313,7 +313,7 @@ func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Lo fnames = append(fnames, fs) // arrays and structs will be decomposed further, so // there's no need to record a name - if !fs.Type.IsArray() && !fs.Type.IsStruct() { + if !fs.Type.IsArray() && !isStructNotSIMD(fs.Type) { slots = maybeAppend(f, slots, fs) } } @@ -339,7 +339,7 @@ func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Lo // now that this f.NamedValues contains values for the struct // fields, recurse into nested structs for i := 0; i < n; i++ { - if name.Type.FieldType(i).IsStruct() { + if isStructNotSIMD(name.Type.FieldType(i)) { slots = decomposeUserStructInto(f, fnames[i], slots) delete(f.NamedValues, *fnames[i]) } else if name.Type.FieldType(i).IsArray() { @@ -351,7 +351,7 @@ func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Lo } func decomposeUserPhi(v *Value) { switch { - case v.Type.IsStruct(): + case isStructNotSIMD(v.Type): decomposeStructPhi(v) case v.Type.IsArray(): decomposeArrayPhi(v) @@ -458,3 +458,7 @@ func deleteNamedVals(f *Func, toDelete []namedVal) { } f.Names = f.Names[:end] } + +func isStructNotSIMD(t *types.Type) bool { + return t.IsStruct() && !t.IsSIMD() +} diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index fb281f2f8452cb..9e46182a4cd9ae 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -399,6 +399,9 @@ func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, return mem case types.TSTRUCT: + if at.IsSIMD() { + break // XXX + } for i := 0; i < at.NumFields(); i++ { et := at.Field(i).Type // might need to read offsets from the fields e := b.NewValue1I(pos, OpStructSelect, et, int64(i), a) @@ -547,6 +550,9 @@ func (x *expandState) rewriteSelectOrArg(pos src.XPos, b *Block, container, a, m case types.TSTRUCT: // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here. + if at.IsSIMD() { + break // XXX + } for i := 0; i < at.NumFields(); i++ { et := at.Field(i).Type e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et)) @@ -713,6 +719,9 @@ func (x *expandState) rewriteWideSelectToStores(pos src.XPos, b *Block, containe case types.TSTRUCT: // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here. + if at.IsSIMD() { + break // XXX + } for i := 0; i < at.NumFields(); i++ { et := at.Field(i).Type m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et)) @@ -859,7 +868,7 @@ func (c *registerCursor) at(t *types.Type, i int) registerCursor { rc.nextSlice += Abi1RO(i * w) return rc } - if t.IsStruct() { + if isStructNotSIMD(t) { for j := 0; j < i; j++ { rc.next(t.FieldType(j)) } @@ -973,7 +982,7 @@ func (x *expandState) regOffset(t *types.Type, i int) Abi1RO { if t.IsArray() { return Abi1RO(i) * x.regWidth(t.Elem()) } - if t.IsStruct() { + if isStructNotSIMD(t) { k := Abi1RO(0) for j := 0; j < i; j++ { k += x.regWidth(t.FieldType(j)) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 90a38c783a861c..512dc065279af7 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1162,6 +1162,40 @@ const ( OpAMD64PSIGNB OpAMD64PCMPEQB OpAMD64PMOVMSKB + OpAMD64VPADDD4 + OpAMD64VMOVDQUload128 + OpAMD64VMOVDQUstore128 + OpAMD64VMOVDQUload256 + OpAMD64VMOVDQUstore256 + OpAMD64VMOVDQUload512 + OpAMD64VMOVDQUstore512 + OpAMD64VPMOVMToVec8x16 + OpAMD64VPMOVMToVec8x32 + OpAMD64VPMOVMToVec8x64 + OpAMD64VPMOVMToVec16x8 + OpAMD64VPMOVMToVec16x16 + OpAMD64VPMOVMToVec16x32 + OpAMD64VPMOVMToVec32x4 + OpAMD64VPMOVMToVec32x8 + OpAMD64VPMOVMToVec32x16 + OpAMD64VPMOVMToVec64x2 + OpAMD64VPMOVMToVec64x4 + OpAMD64VPMOVMToVec64x8 + OpAMD64VPMOVVec8x16ToM + OpAMD64VPMOVVec8x32ToM + OpAMD64VPMOVVec8x64ToM + OpAMD64VPMOVVec16x8ToM + OpAMD64VPMOVVec16x16ToM + OpAMD64VPMOVVec16x32ToM + OpAMD64VPMOVVec32x4ToM + OpAMD64VPMOVVec32x8ToM + OpAMD64VPMOVVec32x16ToM + OpAMD64VPMOVVec64x2ToM + OpAMD64VPMOVVec64x4ToM + OpAMD64VPMOVVec64x8ToM + OpAMD64Zero128 + OpAMD64Zero256 + OpAMD64Zero512 OpARMADD OpARMADDconst @@ -3386,6 +3420,8 @@ const ( OpClobberReg OpPrefetchCache OpPrefetchCacheStreamed + OpAdd32x4 + OpZeroSIMD ) var opcodeTable = [...]opInfo{ @@ -6856,7 +6892,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6872,7 +6908,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6912,8 +6948,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6929,8 +6965,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6946,8 +6982,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6963,8 +6999,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6980,8 +7016,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6994,8 +7030,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7008,9 +7044,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7023,9 +7059,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7038,9 +7074,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7053,9 +7089,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7069,8 +7105,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7087,8 +7123,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7105,8 +7141,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7123,8 +7159,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7141,8 +7177,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7159,8 +7195,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7177,8 +7213,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7195,8 +7231,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7213,9 +7249,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7232,9 +7268,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7251,9 +7287,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7270,9 +7306,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7289,9 +7325,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7308,9 +7344,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7327,9 +7363,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7346,9 +7382,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7365,9 +7401,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7384,9 +7420,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7403,9 +7439,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7422,9 +7458,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7441,9 +7477,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7460,9 +7496,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7479,9 +7515,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7498,9 +7534,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7579,7 +7615,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7593,7 +7629,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8227,7 +8263,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8241,7 +8277,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8321,7 +8357,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8335,7 +8371,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8415,7 +8451,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8429,7 +8465,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8530,8 +8566,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8544,8 +8580,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8558,8 +8594,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8572,8 +8608,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8586,7 +8622,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8599,7 +8635,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8612,7 +8648,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8625,7 +8661,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8638,9 +8674,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8654,9 +8690,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8669,9 +8705,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8685,9 +8721,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8700,9 +8736,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8716,9 +8752,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8732,9 +8768,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8747,8 +8783,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8762,8 +8798,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8776,8 +8812,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8791,8 +8827,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8805,8 +8841,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8820,8 +8856,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8835,8 +8871,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9060,7 +9096,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9074,7 +9110,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9088,7 +9124,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9741,8 +9777,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9760,8 +9796,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9779,8 +9815,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9798,8 +9834,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9817,8 +9853,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9836,8 +9872,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9855,8 +9891,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9874,8 +9910,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9893,8 +9929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9912,8 +9948,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9931,9 +9967,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9951,9 +9987,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9971,9 +10007,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9991,9 +10027,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10011,9 +10047,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10031,9 +10067,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10051,9 +10087,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10071,9 +10107,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10091,9 +10127,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10111,9 +10147,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10131,9 +10167,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10151,9 +10187,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10171,9 +10207,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10191,9 +10227,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10211,9 +10247,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10231,9 +10267,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10251,9 +10287,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10271,9 +10307,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10291,9 +10327,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10311,9 +10347,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10331,9 +10367,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10351,9 +10387,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10371,9 +10407,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10391,9 +10427,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10411,9 +10447,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10430,8 +10466,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10445,8 +10481,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10460,8 +10496,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10475,8 +10511,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10490,8 +10526,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10505,8 +10541,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10520,8 +10556,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10535,8 +10571,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10550,8 +10586,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10565,8 +10601,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10580,9 +10616,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10596,9 +10632,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10612,9 +10648,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10628,9 +10664,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10644,9 +10680,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10660,9 +10696,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10676,9 +10712,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10692,9 +10728,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10708,9 +10744,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10724,9 +10760,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10740,9 +10776,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10756,9 +10792,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10772,9 +10808,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10788,9 +10824,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10804,9 +10840,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10820,9 +10856,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10836,9 +10872,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10852,9 +10888,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10868,9 +10904,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10884,9 +10920,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10900,9 +10936,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10916,9 +10952,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10932,9 +10968,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10948,9 +10984,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10964,9 +11000,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10980,8 +11016,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10995,8 +11031,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11010,8 +11046,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11025,8 +11061,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11040,8 +11076,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11055,8 +11091,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11070,8 +11106,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11085,8 +11121,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11100,8 +11136,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11115,8 +11151,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11130,8 +11166,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11145,8 +11181,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11160,8 +11196,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11175,8 +11211,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11190,8 +11226,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11205,8 +11241,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11220,8 +11256,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11235,8 +11271,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11250,8 +11286,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11265,8 +11301,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12342,7 +12378,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12355,7 +12391,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETNE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12368,7 +12404,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLT, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12381,7 +12417,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12394,7 +12430,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGT, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12407,7 +12443,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12420,7 +12456,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12433,7 +12469,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12446,7 +12482,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETHI, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12459,7 +12495,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCC, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12473,8 +12509,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12488,8 +12524,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12503,8 +12539,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12518,8 +12554,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12533,8 +12569,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12548,8 +12584,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12563,8 +12599,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12578,8 +12614,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12593,8 +12629,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12608,8 +12644,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12998,7 +13034,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13014,7 +13050,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13030,7 +13066,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13047,8 +13083,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13065,8 +13101,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13083,8 +13119,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13100,8 +13136,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13117,8 +13153,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13134,8 +13170,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13151,8 +13187,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13168,8 +13204,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13185,8 +13221,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13202,8 +13238,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13219,8 +13255,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13236,8 +13272,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13253,7 +13289,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13269,7 +13305,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13285,7 +13321,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13301,7 +13337,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13317,7 +13353,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13333,7 +13369,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVLQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13349,7 +13385,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13365,8 +13401,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13379,8 +13415,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13393,8 +13429,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13407,8 +13443,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13421,7 +13457,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -13437,8 +13473,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -13452,8 +13488,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13470,8 +13506,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13487,8 +13523,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13505,8 +13541,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13522,8 +13558,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13539,8 +13575,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13557,8 +13593,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13574,8 +13610,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13592,9 +13628,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13608,9 +13644,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13623,9 +13659,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13639,9 +13675,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13654,9 +13690,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13669,9 +13705,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13685,9 +13721,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13700,9 +13736,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13715,7 +13751,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13728,7 +13764,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13741,7 +13777,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13754,7 +13790,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13767,7 +13803,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13781,8 +13817,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13796,8 +13832,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13810,8 +13846,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13825,8 +13861,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13839,8 +13875,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13854,8 +13890,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13868,8 +13904,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14127,7 +14163,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14143,7 +14179,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14159,7 +14195,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14177,8 +14213,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGB, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14196,8 +14232,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14215,8 +14251,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14235,8 +14271,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14255,8 +14291,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14328,8 +14364,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14344,8 +14380,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14360,8 +14396,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14376,8 +14412,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14392,8 +14428,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14408,8 +14444,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14512,7 +14548,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHT0, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14523,7 +14559,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHNTA, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14706,8 +14742,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14720,7 +14756,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14736,8 +14772,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14750,7 +14786,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14766,8 +14802,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14781,8 +14817,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14798,8 +14834,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14815,8 +14851,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14833,8 +14869,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14850,8 +14886,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14868,9 +14904,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14883,9 +14919,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14899,9 +14935,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14914,9 +14950,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14929,9 +14965,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14945,9 +14981,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14960,9 +14996,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15059,8 +15095,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15076,8 +15112,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15093,8 +15129,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15110,8 +15146,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15127,8 +15163,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15144,8 +15180,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15162,9 +15198,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15181,9 +15217,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15200,9 +15236,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15219,9 +15255,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15238,9 +15274,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15257,9 +15293,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15276,9 +15312,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15295,9 +15331,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15314,9 +15350,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15333,9 +15369,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15352,9 +15388,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15371,9 +15407,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15390,9 +15426,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15409,9 +15445,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15428,9 +15464,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15537,6 +15573,453 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDD4", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUload128", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUstore128", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VMOVDQUload256", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUstore256", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VMOVDQUload512", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUstore512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMOVMToVec8x16", + argLen: 1, + asm: x86.AVPMOVM2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec8x32", + argLen: 1, + asm: x86.AVPMOVM2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec8x64", + argLen: 1, + asm: x86.AVPMOVM2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec16x8", + argLen: 1, + asm: x86.AVPMOVM2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec16x16", + argLen: 1, + asm: x86.AVPMOVM2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec16x32", + argLen: 1, + asm: x86.AVPMOVM2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec32x4", + argLen: 1, + asm: x86.AVPMOVM2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec32x8", + argLen: 1, + asm: x86.AVPMOVM2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec32x16", + argLen: 1, + asm: x86.AVPMOVM2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec64x2", + argLen: 1, + asm: x86.AVPMOVM2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec64x4", + argLen: 1, + asm: x86.AVPMOVM2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec64x8", + argLen: 1, + asm: x86.AVPMOVM2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVVec8x16ToM", + argLen: 1, + asm: x86.AVPMOVB2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec8x32ToM", + argLen: 1, + asm: x86.AVPMOVB2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec8x64ToM", + argLen: 1, + asm: x86.AVPMOVB2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec16x8ToM", + argLen: 1, + asm: x86.AVPMOVW2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec16x16ToM", + argLen: 1, + asm: x86.AVPMOVW2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec16x32ToM", + argLen: 1, + asm: x86.AVPMOVW2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec32x4ToM", + argLen: 1, + asm: x86.AVPMOVD2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec32x8ToM", + argLen: 1, + asm: x86.AVPMOVD2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec32x16ToM", + argLen: 1, + asm: x86.AVPMOVD2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec64x2ToM", + argLen: 1, + asm: x86.AVPMOVQ2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec64x4ToM", + argLen: 1, + asm: x86.AVPMOVQ2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec64x8ToM", + argLen: 1, + asm: x86.AVPMOVQ2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "Zero128", + argLen: 0, + asm: x86.AVPXOR, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "Zero256", + argLen: 0, + asm: x86.AVPXOR, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "Zero512", + argLen: 0, + asm: x86.AVPXORQ, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ADD", @@ -42682,6 +43165,16 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, generic: true, }, + { + name: "Add32x4", + argLen: 2, + generic: true, + }, + { + name: "ZeroSIMD", + argLen: 0, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } @@ -42753,13 +43246,21 @@ var registersAMD64 = [...]Register{ {29, x86.REG_X13, "X13"}, {30, x86.REG_X14, "X14"}, {31, x86.REG_X15, "X15"}, - {32, 0, "SB"}, + {32, x86.REG_K0, "K0"}, + {33, x86.REG_K1, "K1"}, + {34, x86.REG_K2, "K2"}, + {35, x86.REG_K3, "K3"}, + {36, x86.REG_K4, "K4"}, + {37, x86.REG_K5, "K5"}, + {38, x86.REG_K6, "K6"}, + {39, x86.REG_K7, "K7"}, + {40, 0, "SB"}, } var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11} var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30} var gpRegMaskAMD64 = regMask(49135) var fpRegMaskAMD64 = regMask(2147418112) -var specialRegMaskAMD64 = regMask(2147483648) +var specialRegMaskAMD64 = regMask(1093069176832) var framepointerRegAMD64 = int8(5) var linkRegAMD64 = int8(-1) var registersARM = [...]Register{ diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3d7af5f365a116..3afcfe153a1654 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -501,6 +501,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64VPMOVVec16x16ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v) + case OpAMD64VPMOVVec16x32ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec16x32ToM(v) + case OpAMD64VPMOVVec16x8ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec16x8ToM(v) + case OpAMD64VPMOVVec32x16ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec32x16ToM(v) + case OpAMD64VPMOVVec32x4ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec32x4ToM(v) + case OpAMD64VPMOVVec32x8ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec32x8ToM(v) + case OpAMD64VPMOVVec64x2ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec64x2ToM(v) + case OpAMD64VPMOVVec64x4ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec64x4ToM(v) + case OpAMD64VPMOVVec64x8ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec64x8ToM(v) + case OpAMD64VPMOVVec8x16ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec8x16ToM(v) + case OpAMD64VPMOVVec8x32ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v) + case OpAMD64VPMOVVec8x64ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v) case OpAMD64XADDLlock: return rewriteValueAMD64_OpAMD64XADDLlock(v) case OpAMD64XADDQlock: @@ -1198,6 +1222,8 @@ func rewriteValueAMD64(v *Value) bool { case OpZeroExt8to64: v.Op = OpAMD64MOVBQZX return true + case OpZeroSIMD: + return rewriteValueAMD64_OpZeroSIMD(v) } return false } @@ -22812,6 +22838,174 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec16x32ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x32ToM (VPMOVMToVec16x32 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec16x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x8ToM (VPMOVMToVec16x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x16ToM (VPMOVMToVec32x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x4ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x4ToM (VPMOVMToVec32x4 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x8ToM (VPMOVMToVec32x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x2ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x2 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x4ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x16ToM (VPMOVMToVec8x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -26215,6 +26409,48 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueAMD64_OpLocalAddr(v *Value) bool { @@ -29764,6 +30000,51 @@ func rewriteValueAMD64_OpStore(v *Value) bool { v.AddArg3(ptr, val, mem) return true } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 16 + // result: (VMOVDQUstore128 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUstore128) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 32 + // result: (VMOVDQUstore256 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUstore256) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 64 + // result: (VMOVDQUstore512 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUstore512) + v.AddArg3(ptr, val, mem) + return true + } return false } func rewriteValueAMD64_OpTrunc(v *Value) bool { @@ -30117,6 +30398,45 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpZeroSIMD(v *Value) bool { + // match: (ZeroSIMD ) + // cond: t.Size() == 16 + // result: (Zero128 ) + for { + t := v.Type + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64Zero128) + v.Type = t + return true + } + // match: (ZeroSIMD ) + // cond: t.Size() == 32 + // result: (Zero256 ) + for { + t := v.Type + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64Zero256) + v.Type = t + return true + } + // match: (ZeroSIMD ) + // cond: t.Size() == 64 + // result: (Zero512 ) + for { + t := v.Type + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64Zero512) + v.Type = t + return true + } + return false +} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index bfbd3c8522ed24..b7a4ff95d1af63 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -14149,11 +14149,11 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load _ _) - // cond: t.IsStruct() && CanSSA(t) + // cond: t.IsStruct() && CanSSA(t) && !t.IsSIMD() // result: rewriteStructLoad(v) for { t := v.Type - if !(t.IsStruct() && CanSSA(t)) { + if !(t.IsStruct() && CanSSA(t) && !t.IsSIMD()) { break } v.copyOf(rewriteStructLoad(v)) diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index e80b712ddba764..8f921a80037a77 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -596,6 +596,9 @@ func AutoVar(v *Value) (*ir.Name, int64) { // CanSSA reports whether values of type t can be represented as a Value. func CanSSA(t *types.Type) bool { types.CalcSize(t) + if t.IsSIMD() { + return true + } if t.Size() > int64(4*types.PtrSize) { // 4*Widthptr is an arbitrary constant. We want it // to be at least 3*Widthptr so slices can be registerized. diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 6b58e7e5914538..40b3c41a79e189 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1602,6 +1602,104 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { return s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], out) }, sys.AMD64) + + if buildcfg.Experiment.SIMD { + // Only enable intrinsics, if SIMD experiment. + simdIntrinsics(addF) + } +} + +// simdLoadSliceMethod does intrinsic for method form of Load-from-slice +func simdLoadSliceMethod(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // args[0] is unused except for its type. + t := args[0].Type + slice := args[1] + arrlen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? + return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) + } +} + +// simdLoadSlice does intrinsic for function form of Load-from-slice +func simdLoadSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // args[0] is unused except for its type. + t := n.Type() + slice := args[0] + arrlen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? + return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) + } +} + +func simdStoreSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x := args[0] + t := x.Type + slice := args[1] + arrlen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? + s.store(t, ptr, x) + return nil + } +} + +func simdLoadSliceMethodPart(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // args[0] is unused except for its type. + t := args[0].Type + slice := args[1] + arrLen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + + /* + if off := vec.Len() - len(slice) ; off <= 0 { + plain load + } else { + load mask[off] into a scratch vector + masked load/store + } + */ + + // TODO SIMD support on a 32-bit processor + + off := s.newValue2(ssa.OpSub64, types.Types[types.TINT], arrLen, cap) + cond := s.newValue2(ssa.OpLeq64, types.Types[types.TBOOL], off, s.zeroVal(types.Types[types.TINT])) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cond) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + + simdRes := ssaMarker("simdload") + + // We have atomic instructions - use it directly. + s.startBlock(bTrue) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) + s.vars[simdRes] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) + s.endBlock().AddEdgeTo(bEnd) + + // Use original instruction sequence. + s.startBlock(bFalse) + // NOT IMPLEMENTED, NEED TO ADD GENERIC PARTIAL LOAD/STORE + // MASK REGISTER DEPENDS ON ARCH AND ITS SIMD VERSION. + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + return s.variable(simdRes, t) + + } } // findIntrinsic returns a function which builds the SSA equivalent of the @@ -1627,7 +1725,8 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { fn := sym.Name if ssa.IntrinsicsDisable { - if pkg == "internal/runtime/sys" && (fn == "GetCallerPC" || fn == "GrtCallerSP" || fn == "GetClosurePtr") { + if pkg == "internal/runtime/sys" && (fn == "GetCallerPC" || fn == "GrtCallerSP" || fn == "GetClosurePtr") || + pkg == "internal/simd" || pkg == "simd" { // TODO after simd has been moved to package simd, remove internal/simd // These runtime functions don't have definitions, must be intrinsics. } else { return nil diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go new file mode 100644 index 00000000000000..c185a956674b83 --- /dev/null +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -0,0 +1,15 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +package ssagen + +import ( + // "cmd/compile/internal/ir" + // "cmd/compile/internal/ssa" + // "cmd/compile/internal/types" + "cmd/internal/sys" +) + +func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { + // addF("internal/simd", "Int32x4.Uint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + // etc +} diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 542ad823ab8087..a10459eed7b1fd 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -623,6 +623,9 @@ func buildssa(fn *ir.Func, worker int, isPgoHot bool) *ssa.Func { // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also. for _, p := range params.InParams() { typs, offs := p.RegisterTypesAndOffsets() + if len(offs) < len(typs) { + s.Fatalf("len(offs)=%d < len(typs)=%d, params=\n%s", len(offs), len(typs), params) + } for i, t := range typs { o := offs[i] // offset within parameter fo := p.FrameOffset(params) // offset of parameter in frame @@ -1399,7 +1402,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) // If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments // operation for each field, instead of for the whole struct. func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) { - if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() { + if !(base.Flag.MSan || base.Flag.ASan) || !isStructNotSIMD(t) { s.instrument(t, addr, kind) return } @@ -4335,7 +4338,7 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value { return s.constInterface(t) case t.IsSlice(): return s.constSlice(t) - case t.IsStruct(): + case isStructNotSIMD(t): n := t.NumFields() v := s.entryNewValue0(ssa.OpStructMake, t) for i := 0; i < n; i++ { @@ -4349,6 +4352,8 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value { case 1: return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) } + case t.IsSIMD(): + return s.newValue0(ssa.OpZeroSIMD, t) } s.Fatalf("zero for type %v not implemented", t) return nil @@ -5328,7 +5333,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, // do *left = right for all scalar (non-pointer) parts of t. func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { switch { - case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): + case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex() || t.IsSIMD(): s.store(t, left, right) case t.IsPtrShaped(): if t.IsPtr() && t.Elem().NotInHeap() { @@ -5357,7 +5362,7 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski // itab field doesn't need a write barrier (even though it is a pointer). itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) s.store(types.Types[types.TUINTPTR], left, itab) - case t.IsStruct(): + case isStructNotSIMD(t): n := t.NumFields() for i := 0; i < n; i++ { ft := t.FieldType(i) @@ -5394,7 +5399,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) s.store(s.f.Config.Types.BytePtr, idataAddr, idata) - case t.IsStruct(): + case isStructNotSIMD(t): n := t.NumFields() for i := 0; i < n; i++ { ft := t.FieldType(i) @@ -6477,7 +6482,7 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { uintptrTyp := types.Types[types.TUINTPTR] isAggregate := func(t *types.Type) bool { - return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() + return isStructNotSIMD(t) || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() } wOff := 0 @@ -6537,7 +6542,7 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { } baseOffset += t.Elem().Size() } - case t.IsStruct(): + case isStructNotSIMD(t): if t.NumFields() == 0 { n++ // {} counts as a component break @@ -7554,7 +7559,7 @@ func (s *State) UseArgs(n int64) { // fieldIdx finds the index of the field referred to by the ODOT node n. func fieldIdx(n *ir.SelectorExpr) int { t := n.X.Type() - if !t.IsStruct() { + if !isStructNotSIMD(t) { panic("ODOT's LHS is not a struct") } @@ -7762,6 +7767,10 @@ func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr { } } +func isStructNotSIMD(t *types.Type) bool { + return t.IsStruct() && !t.IsSIMD() +} + var ( BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go index 72ec4052a808e4..2aa437b56ffe7e 100644 --- a/src/cmd/compile/internal/types/size.go +++ b/src/cmd/compile/internal/types/size.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/base" "cmd/internal/src" + "internal/buildcfg" "internal/types/errors" ) @@ -410,6 +411,10 @@ func CalcSize(t *Type) { } CalcStructSize(t) w = t.width + if t.IsSIMD() { // XXX + t.intRegs = 0 + t.floatRegs = 1 + } // make fake type to check later to // trigger function argument computation. @@ -452,6 +457,31 @@ func CalcSize(t *Type) { ResumeCheckSize() } +// simdify marks as type as "SIMD", either as a tag field, +// or having the SIMD attribute. The tag field is a marker +// type used to identify a struct that is not really a struct. +// A SIMD type is allocated to a vector register (on amd64, +// xmm, ymm, or zmm). The fields of a SIMD type are ignored +// by the compiler except for the space that they reserve. +func simdify(st *Type, isTag bool) { + st.align = 8 + st.alg = AMEM + st.intRegs = 0 + st.isSIMD = true + if isTag { + st.width = 0 + st.isSIMDTag = true + st.floatRegs = 0 + } else { + st.floatRegs = 1 + } + // if st.Sym() != nil { + // base.Warn("Simdify %s, %v, %d", st.Sym().Name, isTag, st.width) + // } else { + // base.Warn("Simdify %v, %v, %d", st, isTag, st.width) + // } +} + // CalcStructSize calculates the size of t, // filling in t.width, t.align, t.intRegs, and t.floatRegs, // even if size calculation is otherwise disabled. @@ -464,10 +494,27 @@ func CalcStructSize(t *Type) { switch { case sym.Name == "align64" && isAtomicStdPkg(sym.Pkg): maxAlign = 8 + + case buildcfg.Experiment.SIMD && (sym.Pkg.Path == "internal/simd" || sym.Pkg.Path == "simd") && len(t.Fields()) >= 1: + // This gates the experiment -- without it, no user-visible types can be "simd". + // The SSA-visible SIMD types remain. + // TODO after simd has been moved to package simd, remove internal/simd. + switch sym.Name { + case "v128": + simdify(t, true) + return + case "v256": + simdify(t, true) + return + case "v512": + simdify(t, true) + return + } } } fields := t.Fields() + size := calcStructOffset(t, fields, 0) // For non-zero-sized structs which end in a zero-sized field, we @@ -540,6 +587,11 @@ func CalcStructSize(t *Type) { break } } + + if len(t.Fields()) >= 1 && t.Fields()[0].Type.isSIMDTag { + // this catches `type Foo simd.Whatever` -- Foo is also SIMD. + simdify(t, false) + } } // CalcArraySize calculates the size of t, diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index c4080ed0b526ae..41217cb2a9b212 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -201,8 +201,9 @@ type Type struct { intRegs, floatRegs uint8 // registers needed for ABIInternal - flags bitset8 - alg AlgKind // valid if Align > 0 + flags bitset8 + alg AlgKind // valid if Align > 0 + isSIMDTag, isSIMD bool // tag is the marker type, isSIMD means has marker type // size of prefix of object that contains all pointers. valid if Align > 0. // Note that for pointers, this is always PtrSize even if the element type @@ -605,6 +606,12 @@ func newSSA(name string) *Type { return t } +func newSIMD(name string) *Type { + t := newSSA(name) + t.isSIMD = true + return t +} + // NewMap returns a new map Type with key type k and element (aka value) type v. func NewMap(k, v *Type) *Type { t := newType(TMAP) @@ -995,10 +1002,7 @@ func (t *Type) ArgWidth() int64 { func (t *Type) Size() int64 { if t.kind == TSSA { - if t == TypeInt128 { - return 16 - } - return 0 + return t.width } CalcSize(t) return t.width @@ -1626,12 +1630,26 @@ var ( TypeFlags = newSSA("flags") TypeVoid = newSSA("void") TypeInt128 = newSSA("int128") + TypeVec128 = newSIMD("vec128") + TypeVec256 = newSIMD("vec256") + TypeVec512 = newSIMD("vec512") + TypeMask = newSSA("mask") // not a vector, not 100% sure what this should be. TypeResultMem = newResults([]*Type{TypeMem}) ) func init() { TypeInt128.width = 16 TypeInt128.align = 8 + + TypeVec128.width = 16 + TypeVec128.align = 8 + TypeVec256.width = 32 + TypeVec256.align = 8 + TypeVec512.width = 64 + TypeVec512.align = 8 + + TypeMask.width = 8 // This will depend on the architecture; spilling will be "interesting". + TypeMask.align = 8 } // NewNamed returns a new named type for the given type name. obj should be an @@ -2017,3 +2035,7 @@ var SimType [NTYPE]Kind // Fake package for shape types (see typecheck.Shapify()). var ShapePkg = NewPkg("go.shape", "go.shape") + +func (t *Type) IsSIMD() bool { + return t.isSIMD +} diff --git a/src/internal/simd/dummy.s b/src/internal/simd/dummy.s new file mode 100644 index 00000000000000..f78313afee7f7f --- /dev/null +++ b/src/internal/simd/dummy.s @@ -0,0 +1,7 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 + +// Empty file to allow bodyless functions. diff --git a/src/internal/simd/testdata/sample.go b/src/internal/simd/testdata/sample.go new file mode 100644 index 00000000000000..096691201ac40d --- /dev/null +++ b/src/internal/simd/testdata/sample.go @@ -0,0 +1,145 @@ +package sample + +import ( + "internal/simd" + "os" + "unsafe" +) + +type S1 = simd.Float64x4 + +type S2 simd.Float64x4 + +func (s S2) Len() int { + return simd.Float64x4(s).Len() +} + +func (s S2) Load(a []float64) S2 { + return S2(simd.LoadFloat64x4FromSlice(a)) +} + +func (s S2) Store(a []float64) { + simd.Float64x4(s).Store(a) +} + +func (s S2) Add(a S2) S2 { + return S2(simd.Float64x4(s).Add(simd.Float64x4(a))) +} + +func (s S2) Mul(a S2) S2 { + return S2(simd.Float64x4(s).Mul(simd.Float64x4(a))) +} + +type S3 struct { + simd.Float64x4 +} + +func ip64_0(a, b []float64) float64 { + s := 0.0 + for i := range a { + s += a[i] * b[i] + } + return s +} + +func ip64_1(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := simd.LoadFloat64x4FromSlice(a[i:]) + vb := simd.LoadFloat64x4FromSlice(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_1a(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := simd.LoadFloat64x4FromSlice(a[i:]) + vb := simd.LoadFloat64x4FromSlice(b[i:]) + sum = FMA(sum, va, vb) + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +//go:noinline +func FMA(a, b, c simd.Float64x4) simd.Float64x4 { + return a.Add(b.Mul(c)) +} + +func ip64_2(a, b []float64) float64 { + var z S2 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := z.Load(a[i:]) + vb := z.Load(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_3(a, b []float64) float64 { + var z S3 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := simd.LoadFloat64x4FromSlice(a[i:]) + vb := simd.LoadFloat64x4FromSlice(b[i:]) + sum = S3{sum.Add(va.Mul(vb))} + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func main() { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8} + ip0 := ip64_0(a, a) + ip1 := ip64_1(a, a) + ip1a := ip64_1a(a, a) + ip2 := ip64_2(a, a) + ip3 := ip64_3(a, a) + fmt.Printf("Test IP = %f\n", ip0) + fmt.Printf("SIMD IP 1 = %f\n", ip1) + fmt.Printf("SIMD IP 1a = %f\n", ip1a) + fmt.Printf("SIMD IP 2 = %f\n", ip2) + fmt.Printf("SIMD IP 3 = %f\n", ip3) + var z1 S1 + var z2 S2 + var z3 S2 + + s1, s2, s3 := unsafe.Sizeof(z1), unsafe.Sizeof(z2), unsafe.Sizeof(z3) + + fmt.Printf("unsafe.Sizeof(z1, z2, z3)=%d, %d, %d\n", s1, s2, s3) + + fail := false + + if s1 != 32 || s2 != 32 || s3 != 32 { + fmt.Println("Failed a sizeof check, should all be 32") + fail = true + } + + if ip1 != ip0 || ip1a != ip0 || ip2 != ip0 || ip3 != ip0 { + fmt.Println("Failed an inner product check, should all be", ip0) + fail = true + } + + if fail { + os.Exit(1) + } +} From 11d2b28bffb82e0ad0bc102812bed86ce81a1959 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 28 May 2025 17:00:59 +0000 Subject: [PATCH 005/139] [dev.simd] cmd/compile: add and fix k register supports This CL marks the "mask" ssa type as a simd type. This will make the last return of `simdMov` reachable and the spilling of K register correct. This CL also makes `simdReg` able to return K registers. Change-Id: Ia66230d3e5425d9e8bdd0081b008e098382d3827 Reviewed-on: https://go-review.googlesource.com/c/go/+/676876 Reviewed-by: David Chase Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 2 ++ src/cmd/compile/internal/types/type.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index cf5f8134560283..dcc4e30e1e65c2 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1671,6 +1671,8 @@ func simdReg(v *ssa.Value) int16 { panic("simdReg: not a simd type") } switch t.Size() { + case 8: + return v.Reg() // K registers case 16: return v.Reg() case 32: diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 41217cb2a9b212..f7b9b0f3f75940 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1633,7 +1633,7 @@ var ( TypeVec128 = newSIMD("vec128") TypeVec256 = newSIMD("vec256") TypeVec512 = newSIMD("vec512") - TypeMask = newSSA("mask") // not a vector, not 100% sure what this should be. + TypeMask = newSIMD("mask") // not a vector, not 100% sure what this should be. TypeResultMem = newResults([]*Type{TypeMem}) ) From fdb067d946d45869ad3eae6cb2d447c1ad4f6cc4 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 28 May 2025 13:19:16 -0400 Subject: [PATCH 006/139] [dev.simd] simd: initialize directory to make it suitable for testing SIMD this is a multistep operation between two repos to coordinate this move. First copy internal/simd top simd (and adjust so that it works with future generated SIMD), after this lands, update golang/arch/internal/simdgen to target this directory and add it to the end-to-end test (which will also be added once it works and is truly end-to-end), finally remove internal/simd once the updated generator has been submitted. Change-Id: If372baadc0c02e47cc32bc55b39ac19d551b2b21 Reviewed-on: https://go-review.googlesource.com/c/go/+/676955 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao --- src/simd/dummy.s | 7 ++ src/simd/testdata/sample.go | 154 ++++++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 src/simd/dummy.s create mode 100644 src/simd/testdata/sample.go diff --git a/src/simd/dummy.s b/src/simd/dummy.s new file mode 100644 index 00000000000000..f78313afee7f7f --- /dev/null +++ b/src/simd/dummy.s @@ -0,0 +1,7 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 + +// Empty file to allow bodyless functions. diff --git a/src/simd/testdata/sample.go b/src/simd/testdata/sample.go new file mode 100644 index 00000000000000..b8e3697b6bf644 --- /dev/null +++ b/src/simd/testdata/sample.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "simd" + "unsafe" +) + +func load(s []float64) simd.Float64x4 { + return simd.LoadFloat64x4((*[4]float64)(s[:4])) +} + +type S1 = simd.Float64x4 + +type S2 simd.Float64x4 + +func (s S2) Len() int { + return simd.Float64x4(s).Len() +} + +func (s S2) Load(a []float64) S2 { + return S2(load(a)) +} + +func (s S2) Store(a *[4]float64) { + simd.Float64x4(s).Store(a) +} + +func (s S2) Add(a S2) S2 { + return S2(simd.Float64x4(s).Add(simd.Float64x4(a))) +} + +func (s S2) Mul(a S2) S2 { + return S2(simd.Float64x4(s).Mul(simd.Float64x4(a))) +} + +type S3 struct { + simd.Float64x4 +} + +func ip64_0(a, b []float64) float64 { + s := 0.0 + for i := range a { + s += a[i] * b[i] + } + return s +} + +func ip64_1(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := load(a[i:]) + vb := load(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_1a(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := load(a[i:]) + vb := load(b[i:]) + sum = FMA(sum, va, vb) + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +//go:noinline +func FMA(a, b, c simd.Float64x4) simd.Float64x4 { + return a.Add(b.Mul(c)) +} + +func ip64_2(a, b []float64) float64 { + var z S2 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := z.Load(a[i:]) + vb := z.Load(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_3(a, b []float64) float64 { + var z S3 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := load(a[i:]) + vb := load(b[i:]) + sum = S3{sum.Add(va.Mul(vb))} + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func main() { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8} + ip0 := ip64_0(a, a) + ip1 := ip64_1(a, a) + ip1a := ip64_1a(a, a) + ip2 := ip64_2(a, a) + ip3 := ip64_3(a, a) + fmt.Printf("Test IP = %f\n", ip0) + fmt.Printf("SIMD IP 1 = %f\n", ip1) + fmt.Printf("SIMD IP 1a = %f\n", ip1a) + fmt.Printf("SIMD IP 2 = %f\n", ip2) + fmt.Printf("SIMD IP 3 = %f\n", ip3) + var z1 S1 + var z2 S2 + var z3 S2 + + s1, s2, s3 := unsafe.Sizeof(z1), unsafe.Sizeof(z2), unsafe.Sizeof(z3) + + fmt.Printf("unsafe.Sizeof(z1, z2, z3)=%d, %d, %d\n", s1, s2, s3) + + fail := false + + if s1 != 32 || s2 != 32 || s3 != 32 { + fmt.Println("Failed a sizeof check, should all be 32") + fail = true + } + + if ip1 != ip0 || ip1a != ip0 || ip2 != ip0 || ip3 != ip0 { + fmt.Println("Failed an inner product check, should all be", ip0) + fail = true + } + + if fail { + os.Exit(1) + } +} From 1161228bf189713e8cb40911bf790d6a972a704b Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 28 May 2025 17:51:44 +0000 Subject: [PATCH 007/139] [dev.simd] cmd/compile: add a fp1m1fp1 register shape to amd64 Change-Id: I9dd00cc8bef4712eff16968e4962d850859fc3f0 Reviewed-on: https://go-review.googlesource.com/c/go/+/676997 Commit-Queue: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index aafe4d179bb230..c773afa9d37b4a 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -185,6 +185,7 @@ func init() { fp1m1 = regInfo{inputs: fponly, outputs: maskonly} m1fp1 = regInfo{inputs: maskonly, outputs: fponly} fp2m1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp1m1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} @@ -1297,7 +1298,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b0852dba3dc29f..ff53e46e6ce6f2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -2,7 +2,7 @@ package main -func simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { return []opData{ // {name: "VPADDB", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true}, // etc, generated From 62e1fccfb9aa58534a90b475b1c02a68cc174624 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 29 May 2025 08:40:03 -0400 Subject: [PATCH 008/139] [dev.simd] internal: delete unused internal/simd directory this completes the move to "simd" Change-Id: Id2c2707b7b308fb12eb33af705750ce0db2b0fd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/677258 LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao Reviewed-by: Junyang Shao --- src/internal/simd/dummy.s | 7 -- src/internal/simd/testdata/sample.go | 145 --------------------------- 2 files changed, 152 deletions(-) delete mode 100644 src/internal/simd/dummy.s delete mode 100644 src/internal/simd/testdata/sample.go diff --git a/src/internal/simd/dummy.s b/src/internal/simd/dummy.s deleted file mode 100644 index f78313afee7f7f..00000000000000 --- a/src/internal/simd/dummy.s +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 - -// Empty file to allow bodyless functions. diff --git a/src/internal/simd/testdata/sample.go b/src/internal/simd/testdata/sample.go deleted file mode 100644 index 096691201ac40d..00000000000000 --- a/src/internal/simd/testdata/sample.go +++ /dev/null @@ -1,145 +0,0 @@ -package sample - -import ( - "internal/simd" - "os" - "unsafe" -) - -type S1 = simd.Float64x4 - -type S2 simd.Float64x4 - -func (s S2) Len() int { - return simd.Float64x4(s).Len() -} - -func (s S2) Load(a []float64) S2 { - return S2(simd.LoadFloat64x4FromSlice(a)) -} - -func (s S2) Store(a []float64) { - simd.Float64x4(s).Store(a) -} - -func (s S2) Add(a S2) S2 { - return S2(simd.Float64x4(s).Add(simd.Float64x4(a))) -} - -func (s S2) Mul(a S2) S2 { - return S2(simd.Float64x4(s).Mul(simd.Float64x4(a))) -} - -type S3 struct { - simd.Float64x4 -} - -func ip64_0(a, b []float64) float64 { - s := 0.0 - for i := range a { - s += a[i] * b[i] - } - return s -} - -func ip64_1(a, b []float64) float64 { - var z S1 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := simd.LoadFloat64x4FromSlice(a[i:]) - vb := simd.LoadFloat64x4FromSlice(b[i:]) - sum = sum.Add(va.Mul(vb)) - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -func ip64_1a(a, b []float64) float64 { - var z S1 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := simd.LoadFloat64x4FromSlice(a[i:]) - vb := simd.LoadFloat64x4FromSlice(b[i:]) - sum = FMA(sum, va, vb) - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -//go:noinline -func FMA(a, b, c simd.Float64x4) simd.Float64x4 { - return a.Add(b.Mul(c)) -} - -func ip64_2(a, b []float64) float64 { - var z S2 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := z.Load(a[i:]) - vb := z.Load(b[i:]) - sum = sum.Add(va.Mul(vb)) - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -func ip64_3(a, b []float64) float64 { - var z S3 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := simd.LoadFloat64x4FromSlice(a[i:]) - vb := simd.LoadFloat64x4FromSlice(b[i:]) - sum = S3{sum.Add(va.Mul(vb))} - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -func main() { - a := []float64{1, 2, 3, 4, 5, 6, 7, 8} - ip0 := ip64_0(a, a) - ip1 := ip64_1(a, a) - ip1a := ip64_1a(a, a) - ip2 := ip64_2(a, a) - ip3 := ip64_3(a, a) - fmt.Printf("Test IP = %f\n", ip0) - fmt.Printf("SIMD IP 1 = %f\n", ip1) - fmt.Printf("SIMD IP 1a = %f\n", ip1a) - fmt.Printf("SIMD IP 2 = %f\n", ip2) - fmt.Printf("SIMD IP 3 = %f\n", ip3) - var z1 S1 - var z2 S2 - var z3 S2 - - s1, s2, s3 := unsafe.Sizeof(z1), unsafe.Sizeof(z2), unsafe.Sizeof(z3) - - fmt.Printf("unsafe.Sizeof(z1, z2, z3)=%d, %d, %d\n", s1, s2, s3) - - fail := false - - if s1 != 32 || s2 != 32 || s3 != 32 { - fmt.Println("Failed a sizeof check, should all be 32") - fail = true - } - - if ip1 != ip0 || ip1a != ip0 || ip2 != ip0 || ip3 != ip0 { - fmt.Println("Failed an inner product check, should all be", ip0) - fail = true - } - - if fail { - os.Exit(1) - } -} From 71c0e550cd357f05230db70f17c3ba78d8600068 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 29 May 2025 19:05:40 +0000 Subject: [PATCH 009/139] [dev.simd] cmd/dist: disable API check on dev branch Change-Id: I5a167e95a3275bfc39fddc793b0775976747dc9a Reviewed-on: https://go-review.googlesource.com/c/go/+/677277 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/dist/test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 82c6ee4631ee87..a940fd12ed9168 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -939,7 +939,9 @@ func (t *tester) registerTests() { // which is darwin,linux,windows/amd64 and darwin/arm64. // // The same logic applies to the release notes that correspond to each api/next file. - if goos == "darwin" || ((goos == "linux" || goos == "windows") && goarch == "amd64") { + // + // TODO: remove the exclusion of goexperiment simd right before dev.simd branch is merged to master. + if goos == "darwin" || ((goos == "linux" || goos == "windows") && (goarch == "amd64" && !strings.Contains(goexperiment, "simd"))) { t.registerTest("API release note check", &goTest{variant: "check", pkg: "cmd/relnote", testFlags: []string{"-check"}}) t.registerTest("API check", &goTest{variant: "check", pkg: "cmd/api", timeout: 5 * time.Minute, testFlags: []string{"-check"}}) } From eba2430c1654c16a12cc2caaa723ca8ab7bde4b5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 29 May 2025 14:55:01 -0400 Subject: [PATCH 010/139] [dev.simd] simd, cmd/compile, go build, go/doc: test tweaks these are for CL 675618 simd package exists and imports internal/cpu tweak tests to deal with goexperiment/not Change-Id: I2de99d048f0a228d5f3cd750c39ee5925107556e Reviewed-on: https://go-review.googlesource.com/c/go/+/677260 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao --- .../internal/ssagen/intrinsics_test.go | 6 ++++-- src/go/build/deps_test.go | 2 ++ src/go/doc/comment/std.go | 1 + src/go/doc/comment/std_test.go | 5 +++++ src/simd/cpu.go | 20 +++++++++++++++++++ 5 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 src/simd/cpu.go diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index 0623c5f2098c4e..bd9dd616fd8c68 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -7,6 +7,7 @@ package ssagen import ( "flag" "fmt" + "internal/buildcfg" "slices" "strings" "testing" @@ -15,6 +16,7 @@ import ( ) var updateIntrinsics = flag.Bool("update", false, "Print an updated intrinsics table") +var simd = flag.Bool("simd", buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; defaults to GOEXPERIMENT==simd") type testIntrinsicKey struct { archName string @@ -1375,13 +1377,13 @@ func TestIntrinsics(t *testing.T) { gotIntrinsics[testIntrinsicKey{ik.arch.Name, ik.pkg, ik.fn}] = struct{}{} } for ik, _ := range gotIntrinsics { - if _, found := wantIntrinsics[ik]; !found { + if _, found := wantIntrinsics[ik]; !found && (ik.pkg != "simd" || *simd) { t.Errorf("Got unwanted intrinsic %v %v.%v", ik.archName, ik.pkg, ik.fn) } } for ik, _ := range wantIntrinsics { - if _, found := gotIntrinsics[ik]; !found { + if _, found := gotIntrinsics[ik]; !found && (ik.pkg != "simd" || *simd) { t.Errorf("Want missing intrinsic %v %v.%v", ik.archName, ik.pkg, ik.fn) } } diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index b2668a3d7d4fbe..cc00000734a73d 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -70,6 +70,8 @@ var depsRules = ` internal/goarch < internal/abi; internal/byteorder, internal/cpu, internal/goarch < internal/chacha8rand; + internal/cpu < simd; + # RUNTIME is the core runtime group of packages, all of them very light-weight. internal/abi, internal/chacha8rand, diff --git a/src/go/doc/comment/std.go b/src/go/doc/comment/std.go index 191e1f129107de..73cf9627a02b58 100644 --- a/src/go/doc/comment/std.go +++ b/src/go/doc/comment/std.go @@ -35,6 +35,7 @@ var stdPkgs = []string{ "reflect", "regexp", "runtime", + "simd", "slices", "sort", "strconv", diff --git a/src/go/doc/comment/std_test.go b/src/go/doc/comment/std_test.go index bd0379856a4d8c..9a40d1d09a73b4 100644 --- a/src/go/doc/comment/std_test.go +++ b/src/go/doc/comment/std_test.go @@ -5,6 +5,7 @@ package comment import ( + "internal/buildcfg" "internal/diff" "internal/testenv" "slices" @@ -24,6 +25,10 @@ func TestStd(t *testing.T) { list = append(list, pkg) } } + // TODO remove this when simd is the default, for now fake its existence + if !buildcfg.Experiment.SIMD { + list = append(list, "simd") + } slices.Sort(list) have := strings.Join(stdPkgs, "\n") + "\n" diff --git a/src/simd/cpu.go b/src/simd/cpu.go new file mode 100644 index 00000000000000..84bf03cfb03ade --- /dev/null +++ b/src/simd/cpu.go @@ -0,0 +1,20 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd + +// the build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. + +package simd + +import "internal/cpu" + +func HasAVX512BW() bool { + return cpu.X86.HasAVX512BW +} + +func HasAVX512VL() bool { + return cpu.X86.HasAVX512VL +} From 7800f3813c26fea1895ab0bda3f89cdc5c169beb Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 30 May 2025 11:39:02 -0400 Subject: [PATCH 011/139] [dev.simd] cmd/compile: flip sense of intrinsics test for SIMD ENABLE when simd experiment is off, to be sure intrinsics do not leak past the experiment. DISABLE when simd is on, because all this does is cause tests to fail, then whoever failed the test regenerates the simd, doesn't look at the mountain of new intrinsics, and just rubber-stamps the change. All friction, no benefit. Change-Id: I2ef7e0c246aaddd4a52c1d6108cb587adc1b8366 Reviewed-on: https://go-review.googlesource.com/c/go/+/677555 Auto-Submit: Junyang Shao Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index bd9dd616fd8c68..6c7e65abfd4610 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -16,7 +16,10 @@ import ( ) var updateIntrinsics = flag.Bool("update", false, "Print an updated intrinsics table") -var simd = flag.Bool("simd", buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; defaults to GOEXPERIMENT==simd") + +// TODO turn on always. Current setting insures that simd intrinsics do not leak past experiment, +// but also avoids fail+rubber-stamp-update friction while SIMD is under active development. +var simd = flag.Bool("simd", !buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; default to GOEXPERIMENT = NO simd") type testIntrinsicKey struct { archName string From 0ff18a9cca710d5045ec00cc910507bf2e051eaf Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 30 May 2025 12:45:11 -0400 Subject: [PATCH 012/139] [dev.simd] cmd/compile: disable intrinsics test for new simd stuff this test has been unpossible to get working correctly/ as-expected across architectures, experiments, trybots. There benefit is a fairy-tale (we're going to check at the merge), and it costs us time to keep it happy, so for now it is disabled. Change-Id: Iad913d2590deec606d29bedfa100310e6e9a75bc Reviewed-on: https://go-review.googlesource.com/c/go/+/677556 Reviewed-by: Junyang Shao Auto-Submit: David Chase Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index 6c7e65abfd4610..7a212f1c3ae06c 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -7,7 +7,6 @@ package ssagen import ( "flag" "fmt" - "internal/buildcfg" "slices" "strings" "testing" @@ -17,9 +16,8 @@ import ( var updateIntrinsics = flag.Bool("update", false, "Print an updated intrinsics table") -// TODO turn on always. Current setting insures that simd intrinsics do not leak past experiment, -// but also avoids fail+rubber-stamp-update friction while SIMD is under active development. -var simd = flag.Bool("simd", !buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; default to GOEXPERIMENT = NO simd") +// TODO turn on after SIMD is stable. The time burned keeping this test happy during SIMD development has already well exceeded any plausible benefit. +var simd = flag.Bool("simd", false, "Also check SIMD intrinsics; for now, it is noisy and not helpful") type testIntrinsicKey struct { archName string From 8ecbd59ebb77207202e17489db1a4c02175bb1ae Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 22 May 2025 19:59:12 +0000 Subject: [PATCH 013/139] [dev.simd] cmd/compile: generated codes for amd64 SIMD This CL is generated by tool in CL 667155. Change-Id: I3829d0d2c96fe7000e2dd025a3006f96957d777a Reviewed-on: https://go-review.googlesource.com/c/go/+/675618 Reviewed-by: Junyang Shao Auto-Submit: Junyang Shao Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 2311 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 1083 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 591 +- .../internal/ssa/_gen/simdgenericOps.go | 1077 +- src/cmd/compile/internal/ssa/opGen.go | 42376 +++++++++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 25341 ++++++++- .../compile/internal/ssagen/simdintrinsics.go | 1518 +- src/simd/simd_test.go | 165 + src/simd/stubs_amd64.go | 4151 ++ src/simd/types_amd64.go | 662 + 10 files changed, 63502 insertions(+), 15773 deletions(-) create mode 100644 src/simd/simd_test.go create mode 100644 src/simd/stubs_amd64.go create mode 100644 src/simd/types_amd64.go diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 0cd9b8548df67c..d8d1a4c1a46e76 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1,19 +1,2322 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -// Placeholder for generated glue to come later package amd64 import ( "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" + "cmd/internal/obj" + "cmd/internal/obj/x86" ) func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { + p := s.Prog(v.Op.Asm()) + // First arg switch v.Op { + // Immediates + case ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VPCMPW512: + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + + // Registers + case ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VDIVPD256, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPABSW512, + ssa.OpAMD64VRSQRT14PD256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPADDW512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPOPCNTW512, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPANDN256, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPADDD512, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VANDPD128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPS512, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDNPD256, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VDIVPS512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VMINPS128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSQRTPS512, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VANDNPD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS512, + ssa.OpAMD64VPMULDQ512, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPSUBD128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPABSB256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPOPCNTQ128, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPABSQ256, + ssa.OpAMD64VPOPCNTW256, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VRSQRT14PD128, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPOPCNTD512, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VSQRTPD512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPABSD128, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VSQRTPD128, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPABSB512, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VADDPS128, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VANDPS256, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPOPCNTW128, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPOPCNTQ256, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VSQRTPS128, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPABSD256, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTD128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VSQRTPD256, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPABSQ128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VMAXPS512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VPAVGW512: + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + default: + // At least one arg is required. return false } + + // Second arg + switch v.Op { + // Registers + case ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VDIVPD256, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPADDW512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPANDN256, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPADDD512, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VANDPD128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPS512, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDNPD256, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VDIVPS512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VMINPS128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VANDNPD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS512, + ssa.OpAMD64VPMULDQ512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPSUBD128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VADDPS128, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VANDPS256, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VMAXPS512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VPAVGW512: + if p.From.Type == obj.TYPE_CONST { + p.AddRestSourceReg(simdReg(v.Args[0])) + } else { + p.AddRestSourceReg(simdReg(v.Args[1])) + } + } + + // Third arg + switch v.Op { + // Registers + case ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256: + if p.From.Type == obj.TYPE_CONST { + p.AddRestSourceReg(simdReg(v.Args[1])) + } else { + p.AddRestSourceReg(simdReg(v.Args[2])) + } + } + + // Fourth arg + switch v.Op { + case ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VPCMPUBMasked128: + if p.From.Type == obj.TYPE_CONST { + p.AddRestSourceReg(simdReg(v.Args[2])) + } else { + p.AddRestSourceReg(simdReg(v.Args[3])) + } + } + + // Output + switch v.Op { + case ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VDIVPD256, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPABSW512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VRSQRT14PD256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPADDW512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPOPCNTW512, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPANDN256, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPADDD512, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VANDPD128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPS512, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDNPD256, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VDIVPS512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VMINPS128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSQRTPS512, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VANDNPD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS512, + ssa.OpAMD64VPMULDQ512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPSUBD128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPABSB256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPOPCNTQ128, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPABSQ256, + ssa.OpAMD64VPOPCNTW256, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VRSQRT14PD128, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPOPCNTD512, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VSQRTPD512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPABSD128, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VSQRTPD128, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPABSB512, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VADDPS128, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VANDPS256, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPOPCNTW128, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPOPCNTQ256, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VSQRTPS128, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VPABSD256, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTD128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VSQRTPD256, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPABSQ128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VMAXPS512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VPAVGW512: + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + + default: + // One result is required. + return false + } + + // Masked operation are always compiled with zeroing. + switch v.Op { + case ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VSCALEFPSMasked256: + x86.ParseSuffix(p, "Z") + } + return true } diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 3c6be4ccef85d9..a273131d469613 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,4 +1,1081 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -// (AddInt8x16 ...) => (VPADDB ...) -// etc +// The AVX instruction encodings orders vector register from right to left, for example: +// VSUBPS X Y Z means Z=Y-X +// The rules here swapped the order of such X and Y because the ssa to prog lowering in simdssa.go assumes a +// left to right order. +// TODO: we should offload the logic to simdssa.go, instead of here. +// +// Masks are always at the end, immediates always at the beginning. +(AddFloat32x16 x y) => (VADDPS512 y x) +(AndFloat32x16 x y) => (VANDPS512 y x) +(AndNotFloat32x16 x y) => (VANDNPS512 y x) +(ApproximateReciprocalFloat32x16 x) => (VRCP14PS512 x) +(ApproximateReciprocalOfSqrtFloat32x16 x) => (VRSQRT14PS512 x) +(DivFloat32x16 x y) => (VDIVPS512 y x) +(MaxFloat32x16 x y) => (VMAXPS512 y x) +(MinFloat32x16 x y) => (VMINPS512 y x) +(MulFloat32x16 x y) => (VMULPS512 y x) +(MulByPowOf2Float32x16 x y) => (VSCALEFPS512 y x) +(OrFloat32x16 x y) => (VORPS512 y x) +(SqrtFloat32x16 x) => (VSQRTPS512 x) +(SubFloat32x16 x y) => (VADDPS512 y x) +(XorFloat32x16 x y) => (VXORPS512 y x) +(AddFloat32x4 x y) => (VADDPS128 y x) +(AndFloat32x4 x y) => (VANDPS128 y x) +(AndNotFloat32x4 x y) => (VANDNPS128 y x) +(ApproximateReciprocalFloat32x4 x) => (VRCP14PS128 x) +(ApproximateReciprocalOfSqrtFloat32x4 x) => (VRSQRTPS128 x) +(DivFloat32x4 x y) => (VDIVPS128 y x) +(MaxFloat32x4 x y) => (VMAXPS128 y x) +(MinFloat32x4 x y) => (VMINPS128 y x) +(MulFloat32x4 x y) => (VMULPS128 y x) +(MulByPowOf2Float32x4 x y) => (VSCALEFPS128 y x) +(OrFloat32x4 x y) => (VORPS128 y x) +(PairwiseAddFloat32x4 x y) => (VHADDPS128 y x) +(PairwiseSubFloat32x4 x y) => (VHSUBPS128 y x) +(SqrtFloat32x4 x) => (VSQRTPS128 x) +(SubFloat32x4 x y) => (VADDPS128 y x) +(XorFloat32x4 x y) => (VXORPS128 y x) +(AddFloat32x8 x y) => (VADDPS256 y x) +(AndFloat32x8 x y) => (VANDPS256 y x) +(AndNotFloat32x8 x y) => (VANDNPS256 y x) +(ApproximateReciprocalFloat32x8 x) => (VRCP14PS256 x) +(ApproximateReciprocalOfSqrtFloat32x8 x) => (VRSQRTPS256 x) +(DivFloat32x8 x y) => (VDIVPS256 y x) +(MaxFloat32x8 x y) => (VMAXPS256 y x) +(MinFloat32x8 x y) => (VMINPS256 y x) +(MulFloat32x8 x y) => (VMULPS256 y x) +(MulByPowOf2Float32x8 x y) => (VSCALEFPS256 y x) +(OrFloat32x8 x y) => (VORPS256 y x) +(PairwiseAddFloat32x8 x y) => (VHADDPS256 y x) +(PairwiseSubFloat32x8 x y) => (VHSUBPS256 y x) +(SqrtFloat32x8 x) => (VSQRTPS256 x) +(SubFloat32x8 x y) => (VADDPS256 y x) +(XorFloat32x8 x y) => (VXORPS256 y x) +(AddFloat64x2 x y) => (VADDPD128 y x) +(AndFloat64x2 x y) => (VANDPD128 y x) +(AndNotFloat64x2 x y) => (VANDNPD128 y x) +(ApproximateReciprocalFloat64x2 x) => (VRCP14PD128 x) +(ApproximateReciprocalOfSqrtFloat64x2 x) => (VRSQRT14PD128 x) +(DivFloat64x2 x y) => (VDIVPD128 y x) +(MaxFloat64x2 x y) => (VMAXPD128 y x) +(MinFloat64x2 x y) => (VMINPD128 y x) +(MulFloat64x2 x y) => (VMULPD128 y x) +(MulByPowOf2Float64x2 x y) => (VSCALEFPD128 y x) +(OrFloat64x2 x y) => (VORPD128 y x) +(PairwiseAddFloat64x2 x y) => (VHADDPD128 y x) +(PairwiseSubFloat64x2 x y) => (VHSUBPD128 y x) +(SqrtFloat64x2 x) => (VSQRTPD128 x) +(SubFloat64x2 x y) => (VADDPD128 y x) +(XorFloat64x2 x y) => (VXORPD128 y x) +(AddFloat64x4 x y) => (VADDPD256 y x) +(AndFloat64x4 x y) => (VANDPD256 y x) +(AndNotFloat64x4 x y) => (VANDNPD256 y x) +(ApproximateReciprocalFloat64x4 x) => (VRCP14PD256 x) +(ApproximateReciprocalOfSqrtFloat64x4 x) => (VRSQRT14PD256 x) +(DivFloat64x4 x y) => (VDIVPD256 y x) +(MaxFloat64x4 x y) => (VMAXPD256 y x) +(MinFloat64x4 x y) => (VMINPD256 y x) +(MulFloat64x4 x y) => (VMULPD256 y x) +(MulByPowOf2Float64x4 x y) => (VSCALEFPD256 y x) +(OrFloat64x4 x y) => (VORPD256 y x) +(PairwiseAddFloat64x4 x y) => (VHADDPD256 y x) +(PairwiseSubFloat64x4 x y) => (VHSUBPD256 y x) +(SqrtFloat64x4 x) => (VSQRTPD256 x) +(SubFloat64x4 x y) => (VADDPD256 y x) +(XorFloat64x4 x y) => (VXORPD256 y x) +(AddFloat64x8 x y) => (VADDPD512 y x) +(AndFloat64x8 x y) => (VANDPD512 y x) +(AndNotFloat64x8 x y) => (VANDNPD512 y x) +(ApproximateReciprocalFloat64x8 x) => (VRCP14PD512 x) +(ApproximateReciprocalOfSqrtFloat64x8 x) => (VRSQRT14PD512 x) +(DivFloat64x8 x y) => (VDIVPD512 y x) +(MaxFloat64x8 x y) => (VMAXPD512 y x) +(MinFloat64x8 x y) => (VMINPD512 y x) +(MulFloat64x8 x y) => (VMULPD512 y x) +(MulByPowOf2Float64x8 x y) => (VSCALEFPD512 y x) +(OrFloat64x8 x y) => (VORPD512 y x) +(SqrtFloat64x8 x) => (VSQRTPD512 x) +(SubFloat64x8 x y) => (VADDPD512 y x) +(XorFloat64x8 x y) => (VXORPD512 y x) +(AbsoluteInt16x16 x) => (VPABSW256 x) +(AddInt16x16 x y) => (VPADDW256 y x) +(AndInt16x16 x y) => (VPAND256 y x) +(AndNotInt16x16 x y) => (VPANDN256 y x) +(EqualInt16x16 x y) => (VPCMPEQW256 y x) +(GreaterInt16x16 x y) => (VPCMPGTW256 y x) +(MaxInt16x16 x y) => (VPMAXSW256 y x) +(MinInt16x16 x y) => (VPMINSW256 y x) +(MulHighInt16x16 x y) => (VPMULHW256 y x) +(MulLowInt16x16 x y) => (VPMULLW256 y x) +(OrInt16x16 x y) => (VPOR256 y x) +(PairwiseAddInt16x16 x y) => (VPHADDW256 y x) +(PairwiseSubInt16x16 x y) => (VPHSUBW256 y x) +(PopCountInt16x16 x) => (VPOPCNTW256 x) +(SaturatedAddInt16x16 x y) => (VPADDSW256 y x) +(SaturatedPairwiseAddInt16x16 x y) => (VPHADDSW256 y x) +(SaturatedPairwiseSubInt16x16 x y) => (VPHSUBSW256 y x) +(SaturatedSubInt16x16 x y) => (VPSUBSW256 y x) +(SignInt16x16 x y) => (VPSIGNW256 y x) +(SubInt16x16 x y) => (VPSUBW256 y x) +(XorInt16x16 x y) => (VPXOR256 y x) +(AbsoluteInt16x32 x) => (VPABSW512 x) +(AddInt16x32 x y) => (VPADDW512 y x) +(MaxInt16x32 x y) => (VPMAXSW512 y x) +(MinInt16x32 x y) => (VPMINSW512 y x) +(MulHighInt16x32 x y) => (VPMULHW512 y x) +(MulLowInt16x32 x y) => (VPMULLW512 y x) +(PopCountInt16x32 x) => (VPOPCNTW512 x) +(SaturatedAddInt16x32 x y) => (VPADDSW512 y x) +(SaturatedSubInt16x32 x y) => (VPSUBSW512 y x) +(SubInt16x32 x y) => (VPSUBW512 y x) +(AbsoluteInt16x8 x) => (VPABSW128 x) +(AddInt16x8 x y) => (VPADDW128 y x) +(AndInt16x8 x y) => (VPAND128 y x) +(AndNotInt16x8 x y) => (VPANDN128 y x) +(EqualInt16x8 x y) => (VPCMPEQW128 y x) +(GreaterInt16x8 x y) => (VPCMPGTW128 y x) +(MaxInt16x8 x y) => (VPMAXSW128 y x) +(MinInt16x8 x y) => (VPMINSW128 y x) +(MulHighInt16x8 x y) => (VPMULHW128 y x) +(MulLowInt16x8 x y) => (VPMULLW128 y x) +(OrInt16x8 x y) => (VPOR128 y x) +(PairwiseAddInt16x8 x y) => (VPHADDW128 y x) +(PairwiseSubInt16x8 x y) => (VPHSUBW128 y x) +(PopCountInt16x8 x) => (VPOPCNTW128 x) +(SaturatedAddInt16x8 x y) => (VPADDSW128 y x) +(SaturatedPairwiseAddInt16x8 x y) => (VPHADDSW128 y x) +(SaturatedPairwiseSubInt16x8 x y) => (VPHSUBSW128 y x) +(SaturatedSubInt16x8 x y) => (VPSUBSW128 y x) +(SignInt16x8 x y) => (VPSIGNW128 y x) +(SubInt16x8 x y) => (VPSUBW128 y x) +(XorInt16x8 x y) => (VPXOR128 y x) +(AbsoluteInt32x16 x) => (VPABSD512 x) +(AddInt32x16 x y) => (VPADDD512 y x) +(AndInt32x16 x y) => (VPANDD512 y x) +(AndNotInt32x16 x y) => (VPANDND512 y x) +(MaxInt32x16 x y) => (VPMAXSD512 y x) +(MinInt32x16 x y) => (VPMINSD512 y x) +(MulLowInt32x16 x y) => (VPMULLD512 y x) +(OrInt32x16 x y) => (VPORD512 y x) +(PopCountInt32x16 x) => (VPOPCNTD512 x) +(SubInt32x16 x y) => (VPSUBD512 y x) +(XorInt32x16 x y) => (VPXORD512 y x) +(AbsoluteInt32x4 x) => (VPABSD128 x) +(AddInt32x4 x y) => (VPADDD128 y x) +(AndInt32x4 x y) => (VPAND128 y x) +(AndNotInt32x4 x y) => (VPANDN128 y x) +(EqualInt32x4 x y) => (VPCMPEQD128 y x) +(GreaterInt32x4 x y) => (VPCMPGTD128 y x) +(MaxInt32x4 x y) => (VPMAXSD128 y x) +(MinInt32x4 x y) => (VPMINSD128 y x) +(MulEvenWidenInt32x4 x y) => (VPMULDQ128 y x) +(MulLowInt32x4 x y) => (VPMULLD128 y x) +(OrInt32x4 x y) => (VPOR128 y x) +(PairwiseAddInt32x4 x y) => (VPHADDD128 y x) +(PairwiseSubInt32x4 x y) => (VPHSUBD128 y x) +(PopCountInt32x4 x) => (VPOPCNTD128 x) +(SignInt32x4 x y) => (VPSIGND128 y x) +(SubInt32x4 x y) => (VPSUBD128 y x) +(XorInt32x4 x y) => (VPXOR128 y x) +(AbsoluteInt32x8 x) => (VPABSD256 x) +(AddInt32x8 x y) => (VPADDD256 y x) +(AndInt32x8 x y) => (VPAND256 y x) +(AndNotInt32x8 x y) => (VPANDN256 y x) +(EqualInt32x8 x y) => (VPCMPEQD256 y x) +(GreaterInt32x8 x y) => (VPCMPGTD256 y x) +(MaxInt32x8 x y) => (VPMAXSD256 y x) +(MinInt32x8 x y) => (VPMINSD256 y x) +(MulEvenWidenInt32x8 x y) => (VPMULDQ256 y x) +(MulLowInt32x8 x y) => (VPMULLD256 y x) +(OrInt32x8 x y) => (VPOR256 y x) +(PairwiseAddInt32x8 x y) => (VPHADDD256 y x) +(PairwiseSubInt32x8 x y) => (VPHSUBD256 y x) +(PopCountInt32x8 x) => (VPOPCNTD256 x) +(SignInt32x8 x y) => (VPSIGND256 y x) +(SubInt32x8 x y) => (VPSUBD256 y x) +(XorInt32x8 x y) => (VPXOR256 y x) +(AbsoluteInt64x2 x) => (VPABSQ128 x) +(AddInt64x2 x y) => (VPADDQ128 y x) +(AndInt64x2 x y) => (VPAND128 y x) +(AndNotInt64x2 x y) => (VPANDN128 y x) +(EqualInt64x2 x y) => (VPCMPEQQ128 y x) +(MaxInt64x2 x y) => (VPMAXSQ128 y x) +(MinInt64x2 x y) => (VPMINSQ128 y x) +(MulEvenWidenInt64x2 x y) => (VPMULDQ128 y x) +(MulLowInt64x2 x y) => (VPMULLQ128 y x) +(OrInt64x2 x y) => (VPOR128 y x) +(PopCountInt64x2 x) => (VPOPCNTQ128 x) +(SubInt64x2 x y) => (VPSUBQ128 y x) +(XorInt64x2 x y) => (VPXOR128 y x) +(AbsoluteInt64x4 x) => (VPABSQ256 x) +(AddInt64x4 x y) => (VPADDQ256 y x) +(AndInt64x4 x y) => (VPAND256 y x) +(AndNotInt64x4 x y) => (VPANDN256 y x) +(EqualInt64x4 x y) => (VPCMPEQQ256 y x) +(GreaterInt64x4 x y) => (VPCMPGTQ256 y x) +(MaxInt64x4 x y) => (VPMAXSQ256 y x) +(MinInt64x4 x y) => (VPMINSQ256 y x) +(MulEvenWidenInt64x4 x y) => (VPMULDQ256 y x) +(MulLowInt64x4 x y) => (VPMULLQ256 y x) +(OrInt64x4 x y) => (VPOR256 y x) +(PopCountInt64x4 x) => (VPOPCNTQ256 x) +(SubInt64x4 x y) => (VPSUBQ256 y x) +(XorInt64x4 x y) => (VPXOR256 y x) +(AbsoluteInt64x8 x) => (VPABSQ512 x) +(AddInt64x8 x y) => (VPADDQ512 y x) +(AndInt64x8 x y) => (VPANDQ512 y x) +(AndNotInt64x8 x y) => (VPANDNQ512 y x) +(MaxInt64x8 x y) => (VPMAXSQ512 y x) +(MinInt64x8 x y) => (VPMINSQ512 y x) +(MulEvenWidenInt64x8 x y) => (VPMULDQ512 y x) +(MulLowInt64x8 x y) => (VPMULLQ512 y x) +(OrInt64x8 x y) => (VPORQ512 y x) +(PopCountInt64x8 x) => (VPOPCNTQ512 x) +(SubInt64x8 x y) => (VPSUBQ512 y x) +(XorInt64x8 x y) => (VPXORQ512 y x) +(AbsoluteInt8x16 x) => (VPABSB128 x) +(AddInt8x16 x y) => (VPADDB128 y x) +(AndInt8x16 x y) => (VPAND128 y x) +(AndNotInt8x16 x y) => (VPANDN128 y x) +(EqualInt8x16 x y) => (VPCMPEQB128 y x) +(GreaterInt8x16 x y) => (VPCMPGTB128 y x) +(MaxInt8x16 x y) => (VPMAXSB128 y x) +(MinInt8x16 x y) => (VPMINSB128 y x) +(OrInt8x16 x y) => (VPOR128 y x) +(PopCountInt8x16 x) => (VPOPCNTB128 x) +(SaturatedAddInt8x16 x y) => (VPADDSB128 y x) +(SaturatedSubInt8x16 x y) => (VPSUBSB128 y x) +(SignInt8x16 x y) => (VPSIGNB128 y x) +(SubInt8x16 x y) => (VPSUBB128 y x) +(XorInt8x16 x y) => (VPXOR128 y x) +(AbsoluteInt8x32 x) => (VPABSB256 x) +(AddInt8x32 x y) => (VPADDB256 y x) +(AndInt8x32 x y) => (VPAND256 y x) +(AndNotInt8x32 x y) => (VPANDN256 y x) +(EqualInt8x32 x y) => (VPCMPEQB256 y x) +(GreaterInt8x32 x y) => (VPCMPGTB256 y x) +(MaxInt8x32 x y) => (VPMAXSB256 y x) +(MinInt8x32 x y) => (VPMINSB256 y x) +(OrInt8x32 x y) => (VPOR256 y x) +(PopCountInt8x32 x) => (VPOPCNTB256 x) +(SaturatedAddInt8x32 x y) => (VPADDSB256 y x) +(SaturatedSubInt8x32 x y) => (VPSUBSB256 y x) +(SignInt8x32 x y) => (VPSIGNB256 y x) +(SubInt8x32 x y) => (VPSUBB256 y x) +(XorInt8x32 x y) => (VPXOR256 y x) +(AbsoluteInt8x64 x) => (VPABSB512 x) +(AddInt8x64 x y) => (VPADDB512 y x) +(MaxInt8x64 x y) => (VPMAXSB512 y x) +(MinInt8x64 x y) => (VPMINSB512 y x) +(PopCountInt8x64 x) => (VPOPCNTB512 x) +(SaturatedAddInt8x64 x y) => (VPADDSB512 y x) +(SaturatedSubInt8x64 x y) => (VPSUBSB512 y x) +(SubInt8x64 x y) => (VPSUBB512 y x) +(AddUint16x16 x y) => (VPADDW256 y x) +(AndUint16x16 x y) => (VPAND256 y x) +(AndNotUint16x16 x y) => (VPANDN256 y x) +(AverageUint16x16 x y) => (VPAVGW256 y x) +(MaxUint16x16 x y) => (VPMAXUW256 y x) +(MinUint16x16 x y) => (VPMINUW256 y x) +(MulHighUint16x16 x y) => (VPMULHUW256 y x) +(OrUint16x16 x y) => (VPOR256 y x) +(PairwiseAddUint16x16 x y) => (VPHADDW256 y x) +(PairwiseSubUint16x16 x y) => (VPHSUBW256 y x) +(PopCountUint16x16 x) => (VPOPCNTW256 x) +(SaturatedAddUint16x16 x y) => (VPADDSW256 y x) +(SaturatedSubUint16x16 x y) => (VPSUBSW256 y x) +(SubUint16x16 x y) => (VPSUBW256 y x) +(XorUint16x16 x y) => (VPXOR256 y x) +(AddUint16x32 x y) => (VPADDW512 y x) +(AverageUint16x32 x y) => (VPAVGW512 y x) +(MaxUint16x32 x y) => (VPMAXUW512 y x) +(MinUint16x32 x y) => (VPMINUW512 y x) +(MulHighUint16x32 x y) => (VPMULHUW512 y x) +(PopCountUint16x32 x) => (VPOPCNTW512 x) +(SaturatedAddUint16x32 x y) => (VPADDSW512 y x) +(SaturatedSubUint16x32 x y) => (VPSUBSW512 y x) +(SubUint16x32 x y) => (VPSUBW512 y x) +(AddUint16x8 x y) => (VPADDW128 y x) +(AndUint16x8 x y) => (VPAND128 y x) +(AndNotUint16x8 x y) => (VPANDN128 y x) +(AverageUint16x8 x y) => (VPAVGW128 y x) +(MaxUint16x8 x y) => (VPMAXUW128 y x) +(MinUint16x8 x y) => (VPMINUW128 y x) +(MulHighUint16x8 x y) => (VPMULHUW128 y x) +(OrUint16x8 x y) => (VPOR128 y x) +(PairwiseAddUint16x8 x y) => (VPHADDW128 y x) +(PairwiseSubUint16x8 x y) => (VPHSUBW128 y x) +(PopCountUint16x8 x) => (VPOPCNTW128 x) +(SaturatedAddUint16x8 x y) => (VPADDSW128 y x) +(SaturatedSubUint16x8 x y) => (VPSUBSW128 y x) +(SubUint16x8 x y) => (VPSUBW128 y x) +(XorUint16x8 x y) => (VPXOR128 y x) +(AddUint32x16 x y) => (VPADDD512 y x) +(AndUint32x16 x y) => (VPANDD512 y x) +(AndNotUint32x16 x y) => (VPANDND512 y x) +(MaxUint32x16 x y) => (VPMAXUD512 y x) +(MinUint32x16 x y) => (VPMINUD512 y x) +(OrUint32x16 x y) => (VPORD512 y x) +(PopCountUint32x16 x) => (VPOPCNTD512 x) +(SubUint32x16 x y) => (VPSUBD512 y x) +(XorUint32x16 x y) => (VPXORD512 y x) +(AddUint32x4 x y) => (VPADDD128 y x) +(AndUint32x4 x y) => (VPAND128 y x) +(AndNotUint32x4 x y) => (VPANDN128 y x) +(MaxUint32x4 x y) => (VPMAXUD128 y x) +(MinUint32x4 x y) => (VPMINUD128 y x) +(MulEvenWidenUint32x4 x y) => (VPMULUDQ128 y x) +(OrUint32x4 x y) => (VPOR128 y x) +(PairwiseAddUint32x4 x y) => (VPHADDD128 y x) +(PairwiseSubUint32x4 x y) => (VPHSUBD128 y x) +(PopCountUint32x4 x) => (VPOPCNTD128 x) +(SubUint32x4 x y) => (VPSUBD128 y x) +(XorUint32x4 x y) => (VPXOR128 y x) +(AddUint32x8 x y) => (VPADDD256 y x) +(AndUint32x8 x y) => (VPAND256 y x) +(AndNotUint32x8 x y) => (VPANDN256 y x) +(MaxUint32x8 x y) => (VPMAXUD256 y x) +(MinUint32x8 x y) => (VPMINUD256 y x) +(MulEvenWidenUint32x8 x y) => (VPMULUDQ256 y x) +(OrUint32x8 x y) => (VPOR256 y x) +(PairwiseAddUint32x8 x y) => (VPHADDD256 y x) +(PairwiseSubUint32x8 x y) => (VPHSUBD256 y x) +(PopCountUint32x8 x) => (VPOPCNTD256 x) +(SubUint32x8 x y) => (VPSUBD256 y x) +(XorUint32x8 x y) => (VPXOR256 y x) +(AddUint64x2 x y) => (VPADDQ128 y x) +(AndUint64x2 x y) => (VPAND128 y x) +(AndNotUint64x2 x y) => (VPANDN128 y x) +(MaxUint64x2 x y) => (VPMAXUQ128 y x) +(MinUint64x2 x y) => (VPMINUQ128 y x) +(MulEvenWidenUint64x2 x y) => (VPMULUDQ128 y x) +(OrUint64x2 x y) => (VPOR128 y x) +(PopCountUint64x2 x) => (VPOPCNTQ128 x) +(SubUint64x2 x y) => (VPSUBQ128 y x) +(XorUint64x2 x y) => (VPXOR128 y x) +(AddUint64x4 x y) => (VPADDQ256 y x) +(AndUint64x4 x y) => (VPAND256 y x) +(AndNotUint64x4 x y) => (VPANDN256 y x) +(MaxUint64x4 x y) => (VPMAXUQ256 y x) +(MinUint64x4 x y) => (VPMINUQ256 y x) +(MulEvenWidenUint64x4 x y) => (VPMULUDQ256 y x) +(OrUint64x4 x y) => (VPOR256 y x) +(PopCountUint64x4 x) => (VPOPCNTQ256 x) +(SubUint64x4 x y) => (VPSUBQ256 y x) +(XorUint64x4 x y) => (VPXOR256 y x) +(AddUint64x8 x y) => (VPADDQ512 y x) +(AndUint64x8 x y) => (VPANDQ512 y x) +(AndNotUint64x8 x y) => (VPANDNQ512 y x) +(MaxUint64x8 x y) => (VPMAXUQ512 y x) +(MinUint64x8 x y) => (VPMINUQ512 y x) +(MulEvenWidenUint64x8 x y) => (VPMULUDQ512 y x) +(OrUint64x8 x y) => (VPORQ512 y x) +(PopCountUint64x8 x) => (VPOPCNTQ512 x) +(SubUint64x8 x y) => (VPSUBQ512 y x) +(XorUint64x8 x y) => (VPXORQ512 y x) +(AddUint8x16 x y) => (VPADDB128 y x) +(AndUint8x16 x y) => (VPAND128 y x) +(AndNotUint8x16 x y) => (VPANDN128 y x) +(AverageUint8x16 x y) => (VPAVGB128 y x) +(MaxUint8x16 x y) => (VPMAXUB128 y x) +(MinUint8x16 x y) => (VPMINUB128 y x) +(OrUint8x16 x y) => (VPOR128 y x) +(PopCountUint8x16 x) => (VPOPCNTB128 x) +(SaturatedAddUint8x16 x y) => (VPADDSB128 y x) +(SaturatedSubUint8x16 x y) => (VPSUBSB128 y x) +(SubUint8x16 x y) => (VPSUBB128 y x) +(XorUint8x16 x y) => (VPXOR128 y x) +(AddUint8x32 x y) => (VPADDB256 y x) +(AndUint8x32 x y) => (VPAND256 y x) +(AndNotUint8x32 x y) => (VPANDN256 y x) +(AverageUint8x32 x y) => (VPAVGB256 y x) +(MaxUint8x32 x y) => (VPMAXUB256 y x) +(MinUint8x32 x y) => (VPMINUB256 y x) +(OrUint8x32 x y) => (VPOR256 y x) +(PopCountUint8x32 x) => (VPOPCNTB256 x) +(SaturatedAddUint8x32 x y) => (VPADDSB256 y x) +(SaturatedSubUint8x32 x y) => (VPSUBSB256 y x) +(SubUint8x32 x y) => (VPSUBB256 y x) +(XorUint8x32 x y) => (VPXOR256 y x) +(AddUint8x64 x y) => (VPADDB512 y x) +(AverageUint8x64 x y) => (VPAVGB512 y x) +(MaxUint8x64 x y) => (VPMAXUB512 y x) +(MinUint8x64 x y) => (VPMINUB512 y x) +(PopCountUint8x64 x) => (VPOPCNTB512 x) +(SaturatedAddUint8x64 x y) => (VPADDSB512 y x) +(SaturatedSubUint8x64 x y) => (VPSUBSB512 y x) +(SubUint8x64 x y) => (VPSUBB512 y x) +(EqualFloat32x4 x y) => (VCMPPS128 [0] y x) +(EqualFloat64x4 x y) => (VCMPPD256 [0] y x) +(EqualFloat32x8 x y) => (VCMPPS256 [0] y x) +(EqualFloat64x2 x y) => (VCMPPD128 [0] y x) +(GreaterFloat32x8 x y) => (VCMPPS256 [6] y x) +(GreaterFloat64x4 x y) => (VCMPPD256 [6] y x) +(GreaterFloat64x2 x y) => (VCMPPD128 [6] y x) +(GreaterFloat32x4 x y) => (VCMPPS128 [6] y x) +(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] y x) +(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] y x) +(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] y x) +(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] y x) +(IsNanFloat32x8 x y) => (VCMPPS256 [3] y x) +(IsNanFloat64x2 x y) => (VCMPPD128 [3] y x) +(IsNanFloat32x4 x y) => (VCMPPS128 [3] y x) +(IsNanFloat64x4 x y) => (VCMPPD256 [3] y x) +(LessFloat32x4 x y) => (VCMPPS128 [1] y x) +(LessFloat64x4 x y) => (VCMPPD256 [1] y x) +(LessFloat64x2 x y) => (VCMPPD128 [1] y x) +(LessFloat32x8 x y) => (VCMPPS256 [1] y x) +(LessEqualFloat32x4 x y) => (VCMPPS128 [2] y x) +(LessEqualFloat64x4 x y) => (VCMPPD256 [2] y x) +(LessEqualFloat64x2 x y) => (VCMPPD128 [2] y x) +(LessEqualFloat32x8 x y) => (VCMPPS256 [2] y x) +(NotEqualFloat64x2 x y) => (VCMPPD128 [4] y x) +(NotEqualFloat32x4 x y) => (VCMPPS128 [4] y x) +(NotEqualFloat32x8 x y) => (VCMPPS256 [4] y x) +(NotEqualFloat64x4 x y) => (VCMPPD256 [4] y x) +(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedOrInt32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedOrInt32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedOrInt32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedOrInt64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedOrInt64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedOrInt64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedOrUint32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedOrUint32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedOrUint32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedOrUint64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedOrUint64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedOrUint64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 y x)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 y x)) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) +(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) +(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) +(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) +(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) +(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) +(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) +(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) +(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) +(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) +(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) +(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) +(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) +(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) +(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) +(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) +(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) +(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) +(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) +(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) +(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) +(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) +(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) +(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) +(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) +(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) +(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) +(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) +(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) +(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) +(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) +(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) +(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) +(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) +(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) +(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) +(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) +(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) +(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) +(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) +(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) +(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) +(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) +(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) +(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) +(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) +(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) +(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) +(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) +(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) +(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) +(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) +(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) +(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) +(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) +(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) +(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) +(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) +(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) +(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) +(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) +(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) +(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) +(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) +(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) +(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) +(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) +(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) +(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) +(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) +(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) +(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) +(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) +(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) +(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) +(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) +(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) +(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) +(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) +(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) +(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) +(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) +(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) +(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) +(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) +(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) +(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) +(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) +(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) +(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) +(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) +(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) +(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) +(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) +(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) +(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) +(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) +(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) +(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) +(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) +(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) +(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) +(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) +(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) +(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) +(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) +(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) +(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) +(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) +(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) +(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) +(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) +(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) +(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) +(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) +(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) +(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) +(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) +(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) +(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) +(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) +(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) +(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) +(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) +(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) +(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) +(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) +(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) +(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) +(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) +(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) +(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) +(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) +(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) +(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) +(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) +(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) +(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) +(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) +(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) +(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) +(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) +(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) +(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) +(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) +(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) +(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) +(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) +(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) +(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) +(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) +(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) +(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) +(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) +(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) +(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) +(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) +(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) +(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) +(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) +(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) +(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) +(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) +(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) +(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) +(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) +(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) +(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) +(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) +(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) +(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) +(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) +(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) +(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) +(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) +(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) +(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) +(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) +(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) +(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) +(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) +(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) +(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) +(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) +(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) +(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) +(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) +(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) +(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) +(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) +(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) +(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) +(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) +(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) +(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) +(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) +(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) +(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) +(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) +(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) +(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) +(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) +(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) +(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) +(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) +(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) +(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) +(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) +(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) +(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) +(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) +(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) +(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) +(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) +(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) +(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) +(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) +(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) +(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) +(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) +(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) +(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) +(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) +(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) +(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index ff53e46e6ce6f2..b08c5f230ffa44 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,10 +1,591 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. - +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { +func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { return []opData{ - // {name: "VPADDB", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true}, - // etc, generated + {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, + {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, + {name: "VANDNPS512", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, + {name: "VRCP14PS512", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PS512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, + {name: "VDIVPS512", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, + {name: "VANDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, + {name: "VRCP14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, + {name: "VDIVPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, + {name: "VORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, + {name: "VADDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec512"}, + {name: "VXORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, + {name: "VMAXPS512", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, + {name: "VMINPS512", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, + {name: "VMULPS512", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPS512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, + {name: "VORPS512", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, + {name: "VSQRTPS512", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, + {name: "VXORPS512", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, + {name: "VANDPS128", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, + {name: "VANDNPS128", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, + {name: "VRCP14PS128", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, + {name: "VRSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec128"}, + {name: "VDIVPS128", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: true, typ: "Vec128"}, + {name: "VANDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, + {name: "VRCP14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128"}, + {name: "VDIVPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, + {name: "VORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, + {name: "VXORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, + {name: "VMAXPS128", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, + {name: "VMINPS128", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, + {name: "VMULPS128", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPS128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, + {name: "VORPS128", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, + {name: "VHADDPS128", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec128"}, + {name: "VHSUBPS128", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec128"}, + {name: "VSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, + {name: "VADDPS128", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: false, typ: "Vec128"}, + {name: "VXORPS128", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, + {name: "VADDPS256", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec256"}, + {name: "VANDPS256", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, + {name: "VANDNPS256", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, + {name: "VRCP14PS256", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, + {name: "VRSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec256"}, + {name: "VDIVPS256", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, + {name: "VANDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, + {name: "VRCP14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256"}, + {name: "VDIVPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, + {name: "VORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec256"}, + {name: "VXORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, + {name: "VMAXPS256", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, + {name: "VMINPS256", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, + {name: "VMULPS256", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPS256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, + {name: "VORPS256", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, + {name: "VHADDPS256", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec256"}, + {name: "VHSUBPS256", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec256"}, + {name: "VSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, + {name: "VXORPS256", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, + {name: "VADDPD128", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, + {name: "VANDPD128", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, + {name: "VANDNPD128", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, + {name: "VRCP14PD128", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, + {name: "VRSQRT14PD128", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, + {name: "VDIVPD128", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, + {name: "VANDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, + {name: "VRCP14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, + {name: "VDIVPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, + {name: "VORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, + {name: "VXORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, + {name: "VMAXPD128", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, + {name: "VMINPD128", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, + {name: "VMULPD128", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPD128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, + {name: "VORPD128", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, + {name: "VHADDPD128", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec128"}, + {name: "VHSUBPD128", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec128"}, + {name: "VSQRTPD128", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, + {name: "VXORPD128", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, + {name: "VADDPD256", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec256"}, + {name: "VANDPD256", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, + {name: "VANDNPD256", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, + {name: "VRCP14PD256", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, + {name: "VRSQRT14PD256", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, + {name: "VDIVPD256", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, + {name: "VANDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, + {name: "VRCP14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, + {name: "VDIVPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, + {name: "VORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec256"}, + {name: "VXORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, + {name: "VMAXPD256", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, + {name: "VMINPD256", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, + {name: "VMULPD256", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPD256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, + {name: "VORPD256", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, + {name: "VHADDPD256", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec256"}, + {name: "VHSUBPD256", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec256"}, + {name: "VSQRTPD256", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, + {name: "VXORPD256", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, + {name: "VANDPD512", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, + {name: "VANDNPD512", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, + {name: "VRCP14PD512", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PD512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, + {name: "VDIVPD512", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, + {name: "VANDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, + {name: "VRCP14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, + {name: "VDIVPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, + {name: "VORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, + {name: "VADDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, + {name: "VXORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, + {name: "VMAXPD512", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, + {name: "VMINPD512", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, + {name: "VMULPD512", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPD512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, + {name: "VORPD512", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, + {name: "VSQRTPD512", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, + {name: "VADDPD512", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, + {name: "VXORPD512", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, + {name: "VPABSW256", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, + {name: "VPADDW256", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQW256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTW256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec256"}, + {name: "VPABSWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, + {name: "VPMAXSW256", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, + {name: "VPMINSW256", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, + {name: "VPMULHW256", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, + {name: "VPMULLW256", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, + {name: "VPHSUBW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec256"}, + {name: "VPHADDSW256", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec256"}, + {name: "VPHSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec256"}, + {name: "VPSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, + {name: "VPSIGNW256", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec256"}, + {name: "VPSUBW256", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, + {name: "VPABSW512", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, + {name: "VPADDW512", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, + {name: "VPCMPEQW512", argLength: 2, reg: fp2m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTW512", argLength: 2, reg: fp2m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPABSWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, + {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, + {name: "VPMAXSW512", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, + {name: "VPMINSW512", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, + {name: "VPMULHW512", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, + {name: "VPMULLW512", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, + {name: "VPSUBSW512", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, + {name: "VPABSW128", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, + {name: "VPADDW128", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, + {name: "VPCMPEQW128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTW128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec128"}, + {name: "VPABSWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, + {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, + {name: "VPMAXSW128", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, + {name: "VPMINSW128", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, + {name: "VPMULHW128", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, + {name: "VPMULLW128", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, + {name: "VPHSUBW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec128"}, + {name: "VPHADDSW128", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec128"}, + {name: "VPHSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec128"}, + {name: "VPSIGNW128", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec128"}, + {name: "VPABSD512", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, + {name: "VPANDD512", argLength: 2, reg: fp2fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, + {name: "VPABSDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, + {name: "VPMAXSD512", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, + {name: "VPMINSD512", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, + {name: "VPMULLD512", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, + {name: "VPORD512", argLength: 2, reg: fp2fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, + {name: "VPXORD512", argLength: 2, reg: fp2fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, + {name: "VPABSD128", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, + {name: "VPCMPEQD128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTD128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec128"}, + {name: "VPABSDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, + {name: "VPANDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec128"}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, + {name: "VPORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec128"}, + {name: "VPMAXSD128", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, + {name: "VPMINSD128", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, + {name: "VPMULLD128", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, + {name: "VPHSUBD128", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec128"}, + {name: "VPSIGND128", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec128"}, + {name: "VPSUBD128", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, + {name: "VPABSD256", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, + {name: "VPAND256", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQD256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTD256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec256"}, + {name: "VPABSDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, + {name: "VPORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec256"}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, + {name: "VPMAXSD256", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, + {name: "VPMINSD256", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, + {name: "VPMULLD256", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, + {name: "VPHSUBD256", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec256"}, + {name: "VPOPCNTD256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, + {name: "VPSIGND256", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec256"}, + {name: "VPSUBD256", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, + {name: "VPABSQ128", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, + {name: "VPCMPEQQ128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTQ128", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPABSQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, + {name: "VPANDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128"}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128"}, + {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPMAXSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, + {name: "VPMINSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, + {name: "VPMULDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, + {name: "VPMULLQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, + {name: "VPMAXSQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, + {name: "VPMINSQ128", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, + {name: "VPMULDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, + {name: "VPMULLQ128", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, + {name: "VPOR128", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec128"}, + {name: "VPABSQ256", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, + {name: "VPADDQ256", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTQ", commutative: false, typ: "Vec256"}, + {name: "VPABSQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, + {name: "VPANDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256"}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPMAXSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, + {name: "VPMINSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, + {name: "VPMULDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, + {name: "VPMULLQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, + {name: "VPORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, + {name: "VPMAXSQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, + {name: "VPMINSQ256", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, + {name: "VPMULDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, + {name: "VPMULLQ256", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, + {name: "VPOR256", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTQ256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, + {name: "VPSUBQ256", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, + {name: "VPABSQ512", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, + {name: "VPANDQ512", argLength: 2, reg: fp2fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, + {name: "VPCMPEQQ512", argLength: 2, reg: fp2m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQ512", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPABSQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, + {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, + {name: "VPMAXSQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, + {name: "VPMINSQ512", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, + {name: "VPMULDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, + {name: "VPMULLQ512", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTQ512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, + {name: "VPSUBQ512", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, + {name: "VPXORQ512", argLength: 2, reg: fp2fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, + {name: "VPABSB128", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, + {name: "VPADDB128", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, + {name: "VPAND128", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec128"}, + {name: "VPCMPEQB128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTB128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec128"}, + {name: "VPABSBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, + {name: "VPMAXSB128", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, + {name: "VPMINSB128", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, + {name: "VPSIGNB128", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec128"}, + {name: "VPSUBB128", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, + {name: "VPABSB256", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, + {name: "VPADDB256", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, + {name: "VPANDN256", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQB256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTB256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec256"}, + {name: "VPABSBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, + {name: "VPMAXSB256", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, + {name: "VPMINSB256", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTB256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, + {name: "VPSIGNB256", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec256"}, + {name: "VPABSB512", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, + {name: "VPABSBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, + {name: "VPMAXSB512", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, + {name: "VPMINSB512", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTB512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, + {name: "VPSUBSB512", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, + {name: "VPSUBB512", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, + {name: "VPAVGW256", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, + {name: "VPAVGWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, + {name: "VPMAXUW256", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, + {name: "VPMINUW256", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, + {name: "VPMULHUW256", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, + {name: "VPHADDW256", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec256"}, + {name: "VPOPCNTW256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, + {name: "VPADDSW256", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, + {name: "VPAVGW512", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, + {name: "VPADDWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, + {name: "VPAVGWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, + {name: "VPMAXUW512", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, + {name: "VPMINUW512", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, + {name: "VPMULHUW512", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTW512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, + {name: "VPADDSW512", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, + {name: "VPSUBW512", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, + {name: "VPAVGW128", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, + {name: "VPAVGWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, + {name: "VPMAXUW128", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, + {name: "VPMINUW128", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, + {name: "VPMULHUW128", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, + {name: "VPHADDW128", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec128"}, + {name: "VPOPCNTW128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, + {name: "VPADDSW128", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, + {name: "VPSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, + {name: "VPSUBW128", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, + {name: "VPADDD512", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, + {name: "VPANDND512", argLength: 2, reg: fp2fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, + {name: "VPADDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, + {name: "VPANDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, + {name: "VPORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, + {name: "VPMAXUD512", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, + {name: "VPMINUD512", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTD512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, + {name: "VPSUBD512", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, + {name: "VPADDD128", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, + {name: "VPADDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec128"}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, + {name: "VPMAXUD128", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, + {name: "VPMINUD128", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, + {name: "VPHADDD128", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec128"}, + {name: "VPOPCNTD128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, + {name: "VPADDD256", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, + {name: "VPADDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, + {name: "VPANDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec256"}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec256"}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec256"}, + {name: "VPMAXUD256", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, + {name: "VPMINUD256", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, + {name: "VPMULUDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, + {name: "VPHADDD256", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec256"}, + {name: "VPXOR256", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec256"}, + {name: "VPADDQ128", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, + {name: "VPADDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, + {name: "VPMAXUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, + {name: "VPMINUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, + {name: "VPMULUDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, + {name: "VPORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128"}, + {name: "VPMAXUQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, + {name: "VPMINUQ128", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, + {name: "VPMULUDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTQ128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, + {name: "VPSUBQ128", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, + {name: "VPXOR128", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec128"}, + {name: "VPADDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, + {name: "VPMAXUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, + {name: "VPMINUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, + {name: "VPMULUDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256"}, + {name: "VPMAXUQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, + {name: "VPMINUQ256", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, + {name: "VPADDQ512", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, + {name: "VPANDNQ512", argLength: 2, reg: fp2fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, + {name: "VPORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, + {name: "VPMAXUQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, + {name: "VPMINUQ512", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, + {name: "VPMULUDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, + {name: "VPORQ512", argLength: 2, reg: fp2fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, + {name: "VPANDN128", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec128"}, + {name: "VPAVGB128", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, + {name: "VPAVGBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, + {name: "VPMAXUB128", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, + {name: "VPMINUB128", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTB128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, + {name: "VPADDSB128", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, + {name: "VPSUBSB128", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, + {name: "VPAVGB256", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, + {name: "VPAVGBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, + {name: "VPMAXUB256", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, + {name: "VPMINUB256", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, + {name: "VPADDSB256", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, + {name: "VPSUBSB256", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, + {name: "VPSUBB256", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, + {name: "VPADDB512", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, + {name: "VPAVGB512", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, + {name: "VPADDBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, + {name: "VPAVGBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, + {name: "VPMAXUB512", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, + {name: "VPMINUB512", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, + {name: "VPADDSB512", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, + {name: "VCMPPS512", argLength: 2, reg: fp2m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPSMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPS128", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec128"}, + {name: "VCMPPSMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPS256", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec256"}, + {name: "VCMPPSMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPD128", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec128"}, + {name: "VCMPPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VCMPPD256", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec256"}, + {name: "VCMPPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VCMPPD512", argLength: 2, reg: fp2m1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPW256", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPW512", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPW128", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPD512", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPD128", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPD256", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPQ128", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQ256", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPQ512", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPB128", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPB256", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPB512", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUW256", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUW512", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUW128", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUD512", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUD128", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUD256", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUQ128", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQ256", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUQ512", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUB128", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUB256", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUB512", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 666d6879d69198..529ec09de92d5c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1,10 +1,1079 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. - +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main func simdGenericOps() []opData { return []opData{ - // {name: "AddInt8x16", argLength: 2, commutative: true}, - // etc + {name: "AddFloat32x16", argLength: 2, commutative: true}, + {name: "AndFloat32x16", argLength: 2, commutative: true}, + {name: "AndNotFloat32x16", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "DivFloat32x16", argLength: 2, commutative: false}, + {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "GreaterFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, + {name: "IsNanFloat32x16", argLength: 2, commutative: true}, + {name: "LessFloat32x16", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedAndFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedLessFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedMinFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedMulFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float32x16", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedOrFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedSubFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedXorFloat32x16", argLength: 3, commutative: true}, + {name: "MaxFloat32x16", argLength: 2, commutative: true}, + {name: "MinFloat32x16", argLength: 2, commutative: true}, + {name: "MulFloat32x16", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, + {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, + {name: "OrFloat32x16", argLength: 2, commutative: true}, + {name: "SqrtFloat32x16", argLength: 1, commutative: false}, + {name: "SubFloat32x16", argLength: 2, commutative: false}, + {name: "XorFloat32x16", argLength: 2, commutative: true}, + {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AndFloat32x4", argLength: 2, commutative: true}, + {name: "AndNotFloat32x4", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "DivFloat32x4", argLength: 2, commutative: false}, + {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "GreaterFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, + {name: "IsNanFloat32x4", argLength: 2, commutative: true}, + {name: "LessFloat32x4", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedAndFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedLessFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedMinFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedMulFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float32x4", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedOrFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedSubFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedXorFloat32x4", argLength: 3, commutative: true}, + {name: "MaxFloat32x4", argLength: 2, commutative: true}, + {name: "MinFloat32x4", argLength: 2, commutative: true}, + {name: "MulFloat32x4", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, + {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, + {name: "OrFloat32x4", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, + {name: "SqrtFloat32x4", argLength: 1, commutative: false}, + {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "XorFloat32x4", argLength: 2, commutative: true}, + {name: "AddFloat32x8", argLength: 2, commutative: true}, + {name: "AndFloat32x8", argLength: 2, commutative: true}, + {name: "AndNotFloat32x8", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "DivFloat32x8", argLength: 2, commutative: false}, + {name: "EqualFloat32x8", argLength: 2, commutative: true}, + {name: "GreaterFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, + {name: "IsNanFloat32x8", argLength: 2, commutative: true}, + {name: "LessFloat32x8", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedAndFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedLessFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedMinFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedMulFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float32x8", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedOrFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedSubFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedXorFloat32x8", argLength: 3, commutative: true}, + {name: "MaxFloat32x8", argLength: 2, commutative: true}, + {name: "MinFloat32x8", argLength: 2, commutative: true}, + {name: "MulFloat32x8", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, + {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, + {name: "OrFloat32x8", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, + {name: "SqrtFloat32x8", argLength: 1, commutative: false}, + {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "XorFloat32x8", argLength: 2, commutative: true}, + {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AndFloat64x2", argLength: 2, commutative: true}, + {name: "AndNotFloat64x2", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "GreaterFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, + {name: "IsNanFloat64x2", argLength: 2, commutative: true}, + {name: "LessFloat64x2", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedAndFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedLessFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedMinFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedMulFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float64x2", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedOrFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedSubFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedXorFloat64x2", argLength: 3, commutative: true}, + {name: "MaxFloat64x2", argLength: 2, commutative: true}, + {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MulFloat64x2", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, + {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, + {name: "OrFloat64x2", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, + {name: "SqrtFloat64x2", argLength: 1, commutative: false}, + {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "XorFloat64x2", argLength: 2, commutative: true}, + {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AndFloat64x4", argLength: 2, commutative: true}, + {name: "AndNotFloat64x4", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "DivFloat64x4", argLength: 2, commutative: false}, + {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "GreaterFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, + {name: "IsNanFloat64x4", argLength: 2, commutative: true}, + {name: "LessFloat64x4", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedAndFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedLessFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedMinFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedMulFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float64x4", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedOrFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedSubFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedXorFloat64x4", argLength: 3, commutative: true}, + {name: "MaxFloat64x4", argLength: 2, commutative: true}, + {name: "MinFloat64x4", argLength: 2, commutative: true}, + {name: "MulFloat64x4", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, + {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, + {name: "OrFloat64x4", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, + {name: "SqrtFloat64x4", argLength: 1, commutative: false}, + {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "XorFloat64x4", argLength: 2, commutative: true}, + {name: "AddFloat64x8", argLength: 2, commutative: true}, + {name: "AndFloat64x8", argLength: 2, commutative: true}, + {name: "AndNotFloat64x8", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "DivFloat64x8", argLength: 2, commutative: false}, + {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "GreaterFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, + {name: "IsNanFloat64x8", argLength: 2, commutative: true}, + {name: "LessFloat64x8", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedAndFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedLessFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedMinFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedMulFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float64x8", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedOrFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedSubFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedXorFloat64x8", argLength: 3, commutative: true}, + {name: "MaxFloat64x8", argLength: 2, commutative: true}, + {name: "MinFloat64x8", argLength: 2, commutative: true}, + {name: "MulFloat64x8", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, + {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, + {name: "OrFloat64x8", argLength: 2, commutative: true}, + {name: "SqrtFloat64x8", argLength: 1, commutative: false}, + {name: "SubFloat64x8", argLength: 2, commutative: false}, + {name: "XorFloat64x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, + {name: "AddInt16x16", argLength: 2, commutative: true}, + {name: "AndInt16x16", argLength: 2, commutative: true}, + {name: "AndNotInt16x16", argLength: 2, commutative: true}, + {name: "EqualInt16x16", argLength: 2, commutative: true}, + {name: "GreaterInt16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, + {name: "LessInt16x16", argLength: 2, commutative: false}, + {name: "LessEqualInt16x16", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt16x16", argLength: 2, commutative: false}, + {name: "MaskedAddInt16x16", argLength: 3, commutative: true}, + {name: "MaskedEqualInt16x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt16x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt16x16", argLength: 3, commutative: false}, + {name: "MaskedLessInt16x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt16x16", argLength: 3, commutative: false}, + {name: "MaskedMaxInt16x16", argLength: 3, commutative: true}, + {name: "MaskedMinInt16x16", argLength: 3, commutative: true}, + {name: "MaskedMulHighInt16x16", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt16x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt16x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, + {name: "MaskedSubInt16x16", argLength: 3, commutative: false}, + {name: "MaxInt16x16", argLength: 2, commutative: true}, + {name: "MinInt16x16", argLength: 2, commutative: true}, + {name: "MulHighInt16x16", argLength: 2, commutative: true}, + {name: "MulLowInt16x16", argLength: 2, commutative: true}, + {name: "NotEqualInt16x16", argLength: 2, commutative: true}, + {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, + {name: "PopCountInt16x16", argLength: 1, commutative: false}, + {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, + {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "SignInt16x16", argLength: 2, commutative: false}, + {name: "SubInt16x16", argLength: 2, commutative: false}, + {name: "XorInt16x16", argLength: 2, commutative: true}, + {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, + {name: "AddInt16x32", argLength: 2, commutative: true}, + {name: "EqualInt16x32", argLength: 2, commutative: true}, + {name: "GreaterInt16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, + {name: "LessInt16x32", argLength: 2, commutative: false}, + {name: "LessEqualInt16x32", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt16x32", argLength: 2, commutative: false}, + {name: "MaskedAddInt16x32", argLength: 3, commutative: true}, + {name: "MaskedEqualInt16x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt16x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt16x32", argLength: 3, commutative: false}, + {name: "MaskedLessInt16x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt16x32", argLength: 3, commutative: false}, + {name: "MaskedMaxInt16x32", argLength: 3, commutative: true}, + {name: "MaskedMinInt16x32", argLength: 3, commutative: true}, + {name: "MaskedMulHighInt16x32", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt16x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt16x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, + {name: "MaskedSubInt16x32", argLength: 3, commutative: false}, + {name: "MaxInt16x32", argLength: 2, commutative: true}, + {name: "MinInt16x32", argLength: 2, commutative: true}, + {name: "MulHighInt16x32", argLength: 2, commutative: true}, + {name: "MulLowInt16x32", argLength: 2, commutative: true}, + {name: "NotEqualInt16x32", argLength: 2, commutative: true}, + {name: "PopCountInt16x32", argLength: 1, commutative: false}, + {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, + {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "SubInt16x32", argLength: 2, commutative: false}, + {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, + {name: "AddInt16x8", argLength: 2, commutative: true}, + {name: "AndInt16x8", argLength: 2, commutative: true}, + {name: "AndNotInt16x8", argLength: 2, commutative: true}, + {name: "EqualInt16x8", argLength: 2, commutative: true}, + {name: "GreaterInt16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, + {name: "LessInt16x8", argLength: 2, commutative: false}, + {name: "LessEqualInt16x8", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt16x8", argLength: 2, commutative: false}, + {name: "MaskedAddInt16x8", argLength: 3, commutative: true}, + {name: "MaskedEqualInt16x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt16x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt16x8", argLength: 3, commutative: false}, + {name: "MaskedLessInt16x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt16x8", argLength: 3, commutative: false}, + {name: "MaskedMaxInt16x8", argLength: 3, commutative: true}, + {name: "MaskedMinInt16x8", argLength: 3, commutative: true}, + {name: "MaskedMulHighInt16x8", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt16x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt16x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, + {name: "MaskedSubInt16x8", argLength: 3, commutative: false}, + {name: "MaxInt16x8", argLength: 2, commutative: true}, + {name: "MinInt16x8", argLength: 2, commutative: true}, + {name: "MulHighInt16x8", argLength: 2, commutative: true}, + {name: "MulLowInt16x8", argLength: 2, commutative: true}, + {name: "NotEqualInt16x8", argLength: 2, commutative: true}, + {name: "OrInt16x8", argLength: 2, commutative: true}, + {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "PopCountInt16x8", argLength: 1, commutative: false}, + {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, + {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "SignInt16x8", argLength: 2, commutative: false}, + {name: "SubInt16x8", argLength: 2, commutative: false}, + {name: "XorInt16x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, + {name: "AddInt32x16", argLength: 2, commutative: true}, + {name: "AndInt32x16", argLength: 2, commutative: true}, + {name: "AndNotInt32x16", argLength: 2, commutative: true}, + {name: "EqualInt32x16", argLength: 2, commutative: true}, + {name: "GreaterInt32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, + {name: "LessInt32x16", argLength: 2, commutative: false}, + {name: "LessEqualInt32x16", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt32x16", argLength: 2, commutative: false}, + {name: "MaskedAddInt32x16", argLength: 3, commutative: true}, + {name: "MaskedAndInt32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x16", argLength: 3, commutative: true}, + {name: "MaskedEqualInt32x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt32x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt32x16", argLength: 3, commutative: false}, + {name: "MaskedLessInt32x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt32x16", argLength: 3, commutative: false}, + {name: "MaskedMaxInt32x16", argLength: 3, commutative: true}, + {name: "MaskedMinInt32x16", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt32x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt32x16", argLength: 3, commutative: true}, + {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, + {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, + {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, + {name: "MaxInt32x16", argLength: 2, commutative: true}, + {name: "MinInt32x16", argLength: 2, commutative: true}, + {name: "MulLowInt32x16", argLength: 2, commutative: true}, + {name: "NotEqualInt32x16", argLength: 2, commutative: true}, + {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "XorInt32x16", argLength: 2, commutative: true}, + {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, + {name: "AddInt32x4", argLength: 2, commutative: true}, + {name: "AndInt32x4", argLength: 2, commutative: true}, + {name: "AndNotInt32x4", argLength: 2, commutative: true}, + {name: "EqualInt32x4", argLength: 2, commutative: true}, + {name: "GreaterInt32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, + {name: "LessInt32x4", argLength: 2, commutative: false}, + {name: "LessEqualInt32x4", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt32x4", argLength: 2, commutative: false}, + {name: "MaskedAddInt32x4", argLength: 3, commutative: true}, + {name: "MaskedAndInt32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x4", argLength: 3, commutative: true}, + {name: "MaskedEqualInt32x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt32x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt32x4", argLength: 3, commutative: false}, + {name: "MaskedLessInt32x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt32x4", argLength: 3, commutative: false}, + {name: "MaskedMaxInt32x4", argLength: 3, commutative: true}, + {name: "MaskedMinInt32x4", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt32x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt32x4", argLength: 3, commutative: true}, + {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, + {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, + {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, + {name: "MaxInt32x4", argLength: 2, commutative: true}, + {name: "MinInt32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, + {name: "MulLowInt32x4", argLength: 2, commutative: true}, + {name: "NotEqualInt32x4", argLength: 2, commutative: true}, + {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, + {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "SignInt32x4", argLength: 2, commutative: false}, + {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "XorInt32x4", argLength: 2, commutative: true}, + {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, + {name: "AddInt32x8", argLength: 2, commutative: true}, + {name: "AndInt32x8", argLength: 2, commutative: true}, + {name: "AndNotInt32x8", argLength: 2, commutative: true}, + {name: "EqualInt32x8", argLength: 2, commutative: true}, + {name: "GreaterInt32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, + {name: "LessInt32x8", argLength: 2, commutative: false}, + {name: "LessEqualInt32x8", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt32x8", argLength: 2, commutative: false}, + {name: "MaskedAddInt32x8", argLength: 3, commutative: true}, + {name: "MaskedAndInt32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x8", argLength: 3, commutative: true}, + {name: "MaskedEqualInt32x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt32x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt32x8", argLength: 3, commutative: false}, + {name: "MaskedLessInt32x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt32x8", argLength: 3, commutative: false}, + {name: "MaskedMaxInt32x8", argLength: 3, commutative: true}, + {name: "MaskedMinInt32x8", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt32x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt32x8", argLength: 3, commutative: true}, + {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, + {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, + {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, + {name: "MaxInt32x8", argLength: 2, commutative: true}, + {name: "MinInt32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, + {name: "MulLowInt32x8", argLength: 2, commutative: true}, + {name: "NotEqualInt32x8", argLength: 2, commutative: true}, + {name: "OrInt32x8", argLength: 2, commutative: true}, + {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, + {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "SignInt32x8", argLength: 2, commutative: false}, + {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "XorInt32x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, + {name: "AddInt64x2", argLength: 2, commutative: true}, + {name: "AndInt64x2", argLength: 2, commutative: true}, + {name: "AndNotInt64x2", argLength: 2, commutative: true}, + {name: "EqualInt64x2", argLength: 2, commutative: true}, + {name: "GreaterInt64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, + {name: "LessInt64x2", argLength: 2, commutative: false}, + {name: "LessEqualInt64x2", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt64x2", argLength: 2, commutative: false}, + {name: "MaskedAddInt64x2", argLength: 3, commutative: true}, + {name: "MaskedAndInt64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x2", argLength: 3, commutative: true}, + {name: "MaskedEqualInt64x2", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt64x2", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt64x2", argLength: 3, commutative: false}, + {name: "MaskedLessInt64x2", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt64x2", argLength: 3, commutative: false}, + {name: "MaskedMaxInt64x2", argLength: 3, commutative: true}, + {name: "MaskedMinInt64x2", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenInt64x2", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt64x2", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt64x2", argLength: 3, commutative: true}, + {name: "MaskedOrInt64x2", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt64x2", argLength: 2, commutative: false}, + {name: "MaskedSubInt64x2", argLength: 3, commutative: false}, + {name: "MaskedXorInt64x2", argLength: 3, commutative: true}, + {name: "MaxInt64x2", argLength: 2, commutative: true}, + {name: "MinInt64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, + {name: "MulLowInt64x2", argLength: 2, commutative: true}, + {name: "NotEqualInt64x2", argLength: 2, commutative: true}, + {name: "OrInt64x2", argLength: 2, commutative: true}, + {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "SubInt64x2", argLength: 2, commutative: false}, + {name: "XorInt64x2", argLength: 2, commutative: true}, + {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, + {name: "AddInt64x4", argLength: 2, commutative: true}, + {name: "AndInt64x4", argLength: 2, commutative: true}, + {name: "AndNotInt64x4", argLength: 2, commutative: true}, + {name: "EqualInt64x4", argLength: 2, commutative: true}, + {name: "GreaterInt64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, + {name: "LessInt64x4", argLength: 2, commutative: false}, + {name: "LessEqualInt64x4", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt64x4", argLength: 2, commutative: false}, + {name: "MaskedAddInt64x4", argLength: 3, commutative: true}, + {name: "MaskedAndInt64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x4", argLength: 3, commutative: true}, + {name: "MaskedEqualInt64x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt64x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt64x4", argLength: 3, commutative: false}, + {name: "MaskedLessInt64x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt64x4", argLength: 3, commutative: false}, + {name: "MaskedMaxInt64x4", argLength: 3, commutative: true}, + {name: "MaskedMinInt64x4", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenInt64x4", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt64x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt64x4", argLength: 3, commutative: true}, + {name: "MaskedOrInt64x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt64x4", argLength: 2, commutative: false}, + {name: "MaskedSubInt64x4", argLength: 3, commutative: false}, + {name: "MaskedXorInt64x4", argLength: 3, commutative: true}, + {name: "MaxInt64x4", argLength: 2, commutative: true}, + {name: "MinInt64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, + {name: "MulLowInt64x4", argLength: 2, commutative: true}, + {name: "NotEqualInt64x4", argLength: 2, commutative: true}, + {name: "OrInt64x4", argLength: 2, commutative: true}, + {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "SubInt64x4", argLength: 2, commutative: false}, + {name: "XorInt64x4", argLength: 2, commutative: true}, + {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, + {name: "AddInt64x8", argLength: 2, commutative: true}, + {name: "AndInt64x8", argLength: 2, commutative: true}, + {name: "AndNotInt64x8", argLength: 2, commutative: true}, + {name: "EqualInt64x8", argLength: 2, commutative: true}, + {name: "GreaterInt64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, + {name: "LessInt64x8", argLength: 2, commutative: false}, + {name: "LessEqualInt64x8", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt64x8", argLength: 2, commutative: false}, + {name: "MaskedAddInt64x8", argLength: 3, commutative: true}, + {name: "MaskedAndInt64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x8", argLength: 3, commutative: true}, + {name: "MaskedEqualInt64x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt64x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt64x8", argLength: 3, commutative: false}, + {name: "MaskedLessInt64x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt64x8", argLength: 3, commutative: false}, + {name: "MaskedMaxInt64x8", argLength: 3, commutative: true}, + {name: "MaskedMinInt64x8", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenInt64x8", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt64x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt64x8", argLength: 3, commutative: true}, + {name: "MaskedOrInt64x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt64x8", argLength: 2, commutative: false}, + {name: "MaskedSubInt64x8", argLength: 3, commutative: false}, + {name: "MaskedXorInt64x8", argLength: 3, commutative: true}, + {name: "MaxInt64x8", argLength: 2, commutative: true}, + {name: "MinInt64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, + {name: "MulLowInt64x8", argLength: 2, commutative: true}, + {name: "NotEqualInt64x8", argLength: 2, commutative: true}, + {name: "OrInt64x8", argLength: 2, commutative: true}, + {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "SubInt64x8", argLength: 2, commutative: false}, + {name: "XorInt64x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, + {name: "AddInt8x16", argLength: 2, commutative: true}, + {name: "AndInt8x16", argLength: 2, commutative: true}, + {name: "AndNotInt8x16", argLength: 2, commutative: true}, + {name: "EqualInt8x16", argLength: 2, commutative: true}, + {name: "GreaterInt8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, + {name: "LessInt8x16", argLength: 2, commutative: false}, + {name: "LessEqualInt8x16", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt8x16", argLength: 2, commutative: false}, + {name: "MaskedAddInt8x16", argLength: 3, commutative: true}, + {name: "MaskedEqualInt8x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt8x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt8x16", argLength: 3, commutative: false}, + {name: "MaskedLessInt8x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt8x16", argLength: 3, commutative: false}, + {name: "MaskedMaxInt8x16", argLength: 3, commutative: true}, + {name: "MaskedMinInt8x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt8x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt8x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt8x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt8x16", argLength: 3, commutative: false}, + {name: "MaskedSubInt8x16", argLength: 3, commutative: false}, + {name: "MaxInt8x16", argLength: 2, commutative: true}, + {name: "MinInt8x16", argLength: 2, commutative: true}, + {name: "NotEqualInt8x16", argLength: 2, commutative: true}, + {name: "OrInt8x16", argLength: 2, commutative: true}, + {name: "PopCountInt8x16", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, + {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, + {name: "SignInt8x16", argLength: 2, commutative: false}, + {name: "SubInt8x16", argLength: 2, commutative: false}, + {name: "XorInt8x16", argLength: 2, commutative: true}, + {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, + {name: "AddInt8x32", argLength: 2, commutative: true}, + {name: "AndInt8x32", argLength: 2, commutative: true}, + {name: "AndNotInt8x32", argLength: 2, commutative: true}, + {name: "EqualInt8x32", argLength: 2, commutative: true}, + {name: "GreaterInt8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, + {name: "LessInt8x32", argLength: 2, commutative: false}, + {name: "LessEqualInt8x32", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt8x32", argLength: 2, commutative: false}, + {name: "MaskedAddInt8x32", argLength: 3, commutative: true}, + {name: "MaskedEqualInt8x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt8x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt8x32", argLength: 3, commutative: false}, + {name: "MaskedLessInt8x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt8x32", argLength: 3, commutative: false}, + {name: "MaskedMaxInt8x32", argLength: 3, commutative: true}, + {name: "MaskedMinInt8x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt8x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt8x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt8x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt8x32", argLength: 3, commutative: false}, + {name: "MaskedSubInt8x32", argLength: 3, commutative: false}, + {name: "MaxInt8x32", argLength: 2, commutative: true}, + {name: "MinInt8x32", argLength: 2, commutative: true}, + {name: "NotEqualInt8x32", argLength: 2, commutative: true}, + {name: "OrInt8x32", argLength: 2, commutative: true}, + {name: "PopCountInt8x32", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, + {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, + {name: "SignInt8x32", argLength: 2, commutative: false}, + {name: "SubInt8x32", argLength: 2, commutative: false}, + {name: "XorInt8x32", argLength: 2, commutative: true}, + {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, + {name: "AddInt8x64", argLength: 2, commutative: true}, + {name: "EqualInt8x64", argLength: 2, commutative: true}, + {name: "GreaterInt8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, + {name: "LessInt8x64", argLength: 2, commutative: false}, + {name: "LessEqualInt8x64", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt8x64", argLength: 2, commutative: false}, + {name: "MaskedAddInt8x64", argLength: 3, commutative: true}, + {name: "MaskedEqualInt8x64", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt8x64", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt8x64", argLength: 3, commutative: false}, + {name: "MaskedLessInt8x64", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt8x64", argLength: 3, commutative: false}, + {name: "MaskedMaxInt8x64", argLength: 3, commutative: true}, + {name: "MaskedMinInt8x64", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt8x64", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt8x64", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt8x64", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt8x64", argLength: 3, commutative: false}, + {name: "MaskedSubInt8x64", argLength: 3, commutative: false}, + {name: "MaxInt8x64", argLength: 2, commutative: true}, + {name: "MinInt8x64", argLength: 2, commutative: true}, + {name: "NotEqualInt8x64", argLength: 2, commutative: true}, + {name: "PopCountInt8x64", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, + {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, + {name: "SubInt8x64", argLength: 2, commutative: false}, + {name: "AddUint16x16", argLength: 2, commutative: true}, + {name: "AndUint16x16", argLength: 2, commutative: true}, + {name: "AndNotUint16x16", argLength: 2, commutative: true}, + {name: "AverageUint16x16", argLength: 2, commutative: true}, + {name: "EqualUint16x16", argLength: 2, commutative: true}, + {name: "GreaterUint16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, + {name: "LessUint16x16", argLength: 2, commutative: false}, + {name: "LessEqualUint16x16", argLength: 2, commutative: false}, + {name: "MaskedAddUint16x16", argLength: 3, commutative: true}, + {name: "MaskedAverageUint16x16", argLength: 3, commutative: true}, + {name: "MaskedEqualUint16x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint16x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint16x16", argLength: 3, commutative: false}, + {name: "MaskedLessUint16x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint16x16", argLength: 3, commutative: false}, + {name: "MaskedMaxUint16x16", argLength: 3, commutative: true}, + {name: "MaskedMinUint16x16", argLength: 3, commutative: true}, + {name: "MaskedMulHighUint16x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint16x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaxUint16x16", argLength: 2, commutative: true}, + {name: "MinUint16x16", argLength: 2, commutative: true}, + {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "NotEqualUint16x16", argLength: 2, commutative: true}, + {name: "OrUint16x16", argLength: 2, commutative: true}, + {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, + {name: "PopCountUint16x16", argLength: 1, commutative: false}, + {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, + {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "XorUint16x16", argLength: 2, commutative: true}, + {name: "AddUint16x32", argLength: 2, commutative: true}, + {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "EqualUint16x32", argLength: 2, commutative: true}, + {name: "GreaterUint16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, + {name: "LessUint16x32", argLength: 2, commutative: false}, + {name: "LessEqualUint16x32", argLength: 2, commutative: false}, + {name: "MaskedAddUint16x32", argLength: 3, commutative: true}, + {name: "MaskedAverageUint16x32", argLength: 3, commutative: true}, + {name: "MaskedEqualUint16x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint16x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint16x32", argLength: 3, commutative: false}, + {name: "MaskedLessUint16x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint16x32", argLength: 3, commutative: false}, + {name: "MaskedMaxUint16x32", argLength: 3, commutative: true}, + {name: "MaskedMinUint16x32", argLength: 3, commutative: true}, + {name: "MaskedMulHighUint16x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint16x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaxUint16x32", argLength: 2, commutative: true}, + {name: "MinUint16x32", argLength: 2, commutative: true}, + {name: "MulHighUint16x32", argLength: 2, commutative: true}, + {name: "NotEqualUint16x32", argLength: 2, commutative: true}, + {name: "PopCountUint16x32", argLength: 1, commutative: false}, + {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, + {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SubUint16x32", argLength: 2, commutative: false}, + {name: "AddUint16x8", argLength: 2, commutative: true}, + {name: "AndUint16x8", argLength: 2, commutative: true}, + {name: "AndNotUint16x8", argLength: 2, commutative: true}, + {name: "AverageUint16x8", argLength: 2, commutative: true}, + {name: "EqualUint16x8", argLength: 2, commutative: true}, + {name: "GreaterUint16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, + {name: "LessUint16x8", argLength: 2, commutative: false}, + {name: "LessEqualUint16x8", argLength: 2, commutative: false}, + {name: "MaskedAddUint16x8", argLength: 3, commutative: true}, + {name: "MaskedAverageUint16x8", argLength: 3, commutative: true}, + {name: "MaskedEqualUint16x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint16x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint16x8", argLength: 3, commutative: false}, + {name: "MaskedLessUint16x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint16x8", argLength: 3, commutative: false}, + {name: "MaskedMaxUint16x8", argLength: 3, commutative: true}, + {name: "MaskedMinUint16x8", argLength: 3, commutative: true}, + {name: "MaskedMulHighUint16x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint16x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaxUint16x8", argLength: 2, commutative: true}, + {name: "MinUint16x8", argLength: 2, commutative: true}, + {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "NotEqualUint16x8", argLength: 2, commutative: true}, + {name: "OrUint16x8", argLength: 2, commutative: true}, + {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, + {name: "PopCountUint16x8", argLength: 1, commutative: false}, + {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, + {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SubUint16x8", argLength: 2, commutative: false}, + {name: "XorUint16x8", argLength: 2, commutative: true}, + {name: "AddUint32x16", argLength: 2, commutative: true}, + {name: "AndUint32x16", argLength: 2, commutative: true}, + {name: "AndNotUint32x16", argLength: 2, commutative: true}, + {name: "EqualUint32x16", argLength: 2, commutative: true}, + {name: "GreaterUint32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, + {name: "LessUint32x16", argLength: 2, commutative: false}, + {name: "LessEqualUint32x16", argLength: 2, commutative: false}, + {name: "MaskedAddUint32x16", argLength: 3, commutative: true}, + {name: "MaskedAndUint32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x16", argLength: 3, commutative: true}, + {name: "MaskedEqualUint32x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint32x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint32x16", argLength: 3, commutative: false}, + {name: "MaskedLessUint32x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint32x16", argLength: 3, commutative: false}, + {name: "MaskedMaxUint32x16", argLength: 3, commutative: true}, + {name: "MaskedMinUint32x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, + {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, + {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, + {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, + {name: "MaxUint32x16", argLength: 2, commutative: true}, + {name: "MinUint32x16", argLength: 2, commutative: true}, + {name: "NotEqualUint32x16", argLength: 2, commutative: true}, + {name: "OrUint32x16", argLength: 2, commutative: true}, + {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "XorUint32x16", argLength: 2, commutative: true}, + {name: "AddUint32x4", argLength: 2, commutative: true}, + {name: "AndUint32x4", argLength: 2, commutative: true}, + {name: "AndNotUint32x4", argLength: 2, commutative: true}, + {name: "EqualUint32x4", argLength: 2, commutative: true}, + {name: "GreaterUint32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, + {name: "LessUint32x4", argLength: 2, commutative: false}, + {name: "LessEqualUint32x4", argLength: 2, commutative: false}, + {name: "MaskedAddUint32x4", argLength: 3, commutative: true}, + {name: "MaskedAndUint32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x4", argLength: 3, commutative: true}, + {name: "MaskedEqualUint32x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint32x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint32x4", argLength: 3, commutative: false}, + {name: "MaskedLessUint32x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint32x4", argLength: 3, commutative: false}, + {name: "MaskedMaxUint32x4", argLength: 3, commutative: true}, + {name: "MaskedMinUint32x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, + {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, + {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, + {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, + {name: "MaxUint32x4", argLength: 2, commutative: true}, + {name: "MinUint32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, + {name: "NotEqualUint32x4", argLength: 2, commutative: true}, + {name: "OrUint32x4", argLength: 2, commutative: true}, + {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "XorUint32x4", argLength: 2, commutative: true}, + {name: "AddUint32x8", argLength: 2, commutative: true}, + {name: "AndUint32x8", argLength: 2, commutative: true}, + {name: "AndNotUint32x8", argLength: 2, commutative: true}, + {name: "EqualUint32x8", argLength: 2, commutative: true}, + {name: "GreaterUint32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, + {name: "LessUint32x8", argLength: 2, commutative: false}, + {name: "LessEqualUint32x8", argLength: 2, commutative: false}, + {name: "MaskedAddUint32x8", argLength: 3, commutative: true}, + {name: "MaskedAndUint32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x8", argLength: 3, commutative: true}, + {name: "MaskedEqualUint32x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint32x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint32x8", argLength: 3, commutative: false}, + {name: "MaskedLessUint32x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint32x8", argLength: 3, commutative: false}, + {name: "MaskedMaxUint32x8", argLength: 3, commutative: true}, + {name: "MaskedMinUint32x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, + {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, + {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, + {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, + {name: "MaxUint32x8", argLength: 2, commutative: true}, + {name: "MinUint32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, + {name: "NotEqualUint32x8", argLength: 2, commutative: true}, + {name: "OrUint32x8", argLength: 2, commutative: true}, + {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "XorUint32x8", argLength: 2, commutative: true}, + {name: "AddUint64x2", argLength: 2, commutative: true}, + {name: "AndUint64x2", argLength: 2, commutative: true}, + {name: "AndNotUint64x2", argLength: 2, commutative: true}, + {name: "EqualUint64x2", argLength: 2, commutative: true}, + {name: "GreaterUint64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, + {name: "LessUint64x2", argLength: 2, commutative: false}, + {name: "LessEqualUint64x2", argLength: 2, commutative: false}, + {name: "MaskedAddUint64x2", argLength: 3, commutative: true}, + {name: "MaskedAndUint64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x2", argLength: 3, commutative: true}, + {name: "MaskedEqualUint64x2", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint64x2", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint64x2", argLength: 3, commutative: false}, + {name: "MaskedLessUint64x2", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint64x2", argLength: 3, commutative: false}, + {name: "MaskedMaxUint64x2", argLength: 3, commutative: true}, + {name: "MaskedMinUint64x2", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenUint64x2", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint64x2", argLength: 3, commutative: true}, + {name: "MaskedOrUint64x2", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint64x2", argLength: 2, commutative: false}, + {name: "MaskedSubUint64x2", argLength: 3, commutative: false}, + {name: "MaskedXorUint64x2", argLength: 3, commutative: true}, + {name: "MaxUint64x2", argLength: 2, commutative: true}, + {name: "MinUint64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, + {name: "NotEqualUint64x2", argLength: 2, commutative: true}, + {name: "OrUint64x2", argLength: 2, commutative: true}, + {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "SubUint64x2", argLength: 2, commutative: false}, + {name: "XorUint64x2", argLength: 2, commutative: true}, + {name: "AddUint64x4", argLength: 2, commutative: true}, + {name: "AndUint64x4", argLength: 2, commutative: true}, + {name: "AndNotUint64x4", argLength: 2, commutative: true}, + {name: "EqualUint64x4", argLength: 2, commutative: true}, + {name: "GreaterUint64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, + {name: "LessUint64x4", argLength: 2, commutative: false}, + {name: "LessEqualUint64x4", argLength: 2, commutative: false}, + {name: "MaskedAddUint64x4", argLength: 3, commutative: true}, + {name: "MaskedAndUint64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x4", argLength: 3, commutative: true}, + {name: "MaskedEqualUint64x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint64x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint64x4", argLength: 3, commutative: false}, + {name: "MaskedLessUint64x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint64x4", argLength: 3, commutative: false}, + {name: "MaskedMaxUint64x4", argLength: 3, commutative: true}, + {name: "MaskedMinUint64x4", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenUint64x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint64x4", argLength: 3, commutative: true}, + {name: "MaskedOrUint64x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint64x4", argLength: 2, commutative: false}, + {name: "MaskedSubUint64x4", argLength: 3, commutative: false}, + {name: "MaskedXorUint64x4", argLength: 3, commutative: true}, + {name: "MaxUint64x4", argLength: 2, commutative: true}, + {name: "MinUint64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, + {name: "NotEqualUint64x4", argLength: 2, commutative: true}, + {name: "OrUint64x4", argLength: 2, commutative: true}, + {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "SubUint64x4", argLength: 2, commutative: false}, + {name: "XorUint64x4", argLength: 2, commutative: true}, + {name: "AddUint64x8", argLength: 2, commutative: true}, + {name: "AndUint64x8", argLength: 2, commutative: true}, + {name: "AndNotUint64x8", argLength: 2, commutative: true}, + {name: "EqualUint64x8", argLength: 2, commutative: true}, + {name: "GreaterUint64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, + {name: "LessUint64x8", argLength: 2, commutative: false}, + {name: "LessEqualUint64x8", argLength: 2, commutative: false}, + {name: "MaskedAddUint64x8", argLength: 3, commutative: true}, + {name: "MaskedAndUint64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x8", argLength: 3, commutative: true}, + {name: "MaskedEqualUint64x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint64x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint64x8", argLength: 3, commutative: false}, + {name: "MaskedLessUint64x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint64x8", argLength: 3, commutative: false}, + {name: "MaskedMaxUint64x8", argLength: 3, commutative: true}, + {name: "MaskedMinUint64x8", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenUint64x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint64x8", argLength: 3, commutative: true}, + {name: "MaskedOrUint64x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint64x8", argLength: 2, commutative: false}, + {name: "MaskedSubUint64x8", argLength: 3, commutative: false}, + {name: "MaskedXorUint64x8", argLength: 3, commutative: true}, + {name: "MaxUint64x8", argLength: 2, commutative: true}, + {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, + {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "OrUint64x8", argLength: 2, commutative: true}, + {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "SubUint64x8", argLength: 2, commutative: false}, + {name: "XorUint64x8", argLength: 2, commutative: true}, + {name: "AddUint8x16", argLength: 2, commutative: true}, + {name: "AndUint8x16", argLength: 2, commutative: true}, + {name: "AndNotUint8x16", argLength: 2, commutative: true}, + {name: "AverageUint8x16", argLength: 2, commutative: true}, + {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "GreaterUint8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, + {name: "LessUint8x16", argLength: 2, commutative: false}, + {name: "LessEqualUint8x16", argLength: 2, commutative: false}, + {name: "MaskedAddUint8x16", argLength: 3, commutative: true}, + {name: "MaskedAverageUint8x16", argLength: 3, commutative: true}, + {name: "MaskedEqualUint8x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint8x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint8x16", argLength: 3, commutative: false}, + {name: "MaskedLessUint8x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint8x16", argLength: 3, commutative: false}, + {name: "MaskedMaxUint8x16", argLength: 3, commutative: true}, + {name: "MaskedMinUint8x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint8x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint8x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint8x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint8x16", argLength: 3, commutative: false}, + {name: "MaskedSubUint8x16", argLength: 3, commutative: false}, + {name: "MaxUint8x16", argLength: 2, commutative: true}, + {name: "MinUint8x16", argLength: 2, commutative: true}, + {name: "NotEqualUint8x16", argLength: 2, commutative: true}, + {name: "OrUint8x16", argLength: 2, commutative: true}, + {name: "PopCountUint8x16", argLength: 1, commutative: false}, + {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, + {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SubUint8x16", argLength: 2, commutative: false}, + {name: "XorUint8x16", argLength: 2, commutative: true}, + {name: "AddUint8x32", argLength: 2, commutative: true}, + {name: "AndUint8x32", argLength: 2, commutative: true}, + {name: "AndNotUint8x32", argLength: 2, commutative: true}, + {name: "AverageUint8x32", argLength: 2, commutative: true}, + {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "GreaterUint8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, + {name: "LessUint8x32", argLength: 2, commutative: false}, + {name: "LessEqualUint8x32", argLength: 2, commutative: false}, + {name: "MaskedAddUint8x32", argLength: 3, commutative: true}, + {name: "MaskedAverageUint8x32", argLength: 3, commutative: true}, + {name: "MaskedEqualUint8x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint8x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint8x32", argLength: 3, commutative: false}, + {name: "MaskedLessUint8x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint8x32", argLength: 3, commutative: false}, + {name: "MaskedMaxUint8x32", argLength: 3, commutative: true}, + {name: "MaskedMinUint8x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint8x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint8x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint8x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint8x32", argLength: 3, commutative: false}, + {name: "MaskedSubUint8x32", argLength: 3, commutative: false}, + {name: "MaxUint8x32", argLength: 2, commutative: true}, + {name: "MinUint8x32", argLength: 2, commutative: true}, + {name: "NotEqualUint8x32", argLength: 2, commutative: true}, + {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "PopCountUint8x32", argLength: 1, commutative: false}, + {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, + {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SubUint8x32", argLength: 2, commutative: false}, + {name: "XorUint8x32", argLength: 2, commutative: true}, + {name: "AddUint8x64", argLength: 2, commutative: true}, + {name: "AverageUint8x64", argLength: 2, commutative: true}, + {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "GreaterUint8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, + {name: "LessUint8x64", argLength: 2, commutative: false}, + {name: "LessEqualUint8x64", argLength: 2, commutative: false}, + {name: "MaskedAddUint8x64", argLength: 3, commutative: true}, + {name: "MaskedAverageUint8x64", argLength: 3, commutative: true}, + {name: "MaskedEqualUint8x64", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint8x64", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint8x64", argLength: 3, commutative: false}, + {name: "MaskedLessUint8x64", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint8x64", argLength: 3, commutative: false}, + {name: "MaskedMaxUint8x64", argLength: 3, commutative: true}, + {name: "MaskedMinUint8x64", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint8x64", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint8x64", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint8x64", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint8x64", argLength: 3, commutative: false}, + {name: "MaskedSubUint8x64", argLength: 3, commutative: false}, + {name: "MaxUint8x64", argLength: 2, commutative: true}, + {name: "MinUint8x64", argLength: 2, commutative: true}, + {name: "NotEqualUint8x64", argLength: 2, commutative: true}, + {name: "PopCountUint8x64", argLength: 1, commutative: false}, + {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, + {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SubUint8x64", argLength: 2, commutative: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9b80b77118163f..97a4a4825342db 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1196,6 +1196,590 @@ const ( OpAMD64Zero128 OpAMD64Zero256 OpAMD64Zero512 + OpAMD64VADDPS512 + OpAMD64VANDPS512 + OpAMD64VANDNPS512 + OpAMD64VRCP14PS512 + OpAMD64VRSQRT14PS512 + OpAMD64VDIVPS512 + OpAMD64VANDPSMasked512 + OpAMD64VANDNPSMasked512 + OpAMD64VRCP14PSMasked512 + OpAMD64VRSQRT14PSMasked512 + OpAMD64VDIVPSMasked512 + OpAMD64VMAXPSMasked512 + OpAMD64VMINPSMasked512 + OpAMD64VMULPSMasked512 + OpAMD64VSCALEFPSMasked512 + OpAMD64VORPSMasked512 + OpAMD64VSQRTPSMasked512 + OpAMD64VADDPSMasked512 + OpAMD64VXORPSMasked512 + OpAMD64VMAXPS512 + OpAMD64VMINPS512 + OpAMD64VMULPS512 + OpAMD64VSCALEFPS512 + OpAMD64VORPS512 + OpAMD64VSQRTPS512 + OpAMD64VXORPS512 + OpAMD64VANDPS128 + OpAMD64VANDNPS128 + OpAMD64VRCP14PS128 + OpAMD64VRSQRTPS128 + OpAMD64VDIVPS128 + OpAMD64VADDPSMasked128 + OpAMD64VANDPSMasked128 + OpAMD64VANDNPSMasked128 + OpAMD64VRCP14PSMasked128 + OpAMD64VRSQRT14PSMasked128 + OpAMD64VDIVPSMasked128 + OpAMD64VMAXPSMasked128 + OpAMD64VMINPSMasked128 + OpAMD64VMULPSMasked128 + OpAMD64VSCALEFPSMasked128 + OpAMD64VORPSMasked128 + OpAMD64VSQRTPSMasked128 + OpAMD64VXORPSMasked128 + OpAMD64VMAXPS128 + OpAMD64VMINPS128 + OpAMD64VMULPS128 + OpAMD64VSCALEFPS128 + OpAMD64VORPS128 + OpAMD64VHADDPS128 + OpAMD64VHSUBPS128 + OpAMD64VSQRTPS128 + OpAMD64VADDPS128 + OpAMD64VXORPS128 + OpAMD64VADDPS256 + OpAMD64VANDPS256 + OpAMD64VANDNPS256 + OpAMD64VRCP14PS256 + OpAMD64VRSQRTPS256 + OpAMD64VDIVPS256 + OpAMD64VANDPSMasked256 + OpAMD64VANDNPSMasked256 + OpAMD64VRCP14PSMasked256 + OpAMD64VRSQRT14PSMasked256 + OpAMD64VDIVPSMasked256 + OpAMD64VMAXPSMasked256 + OpAMD64VMINPSMasked256 + OpAMD64VMULPSMasked256 + OpAMD64VSCALEFPSMasked256 + OpAMD64VORPSMasked256 + OpAMD64VSQRTPSMasked256 + OpAMD64VADDPSMasked256 + OpAMD64VXORPSMasked256 + OpAMD64VMAXPS256 + OpAMD64VMINPS256 + OpAMD64VMULPS256 + OpAMD64VSCALEFPS256 + OpAMD64VORPS256 + OpAMD64VHADDPS256 + OpAMD64VHSUBPS256 + OpAMD64VSQRTPS256 + OpAMD64VXORPS256 + OpAMD64VADDPD128 + OpAMD64VANDPD128 + OpAMD64VANDNPD128 + OpAMD64VRCP14PD128 + OpAMD64VRSQRT14PD128 + OpAMD64VDIVPD128 + OpAMD64VADDPDMasked128 + OpAMD64VANDPDMasked128 + OpAMD64VANDNPDMasked128 + OpAMD64VRCP14PDMasked128 + OpAMD64VRSQRT14PDMasked128 + OpAMD64VDIVPDMasked128 + OpAMD64VMAXPDMasked128 + OpAMD64VMINPDMasked128 + OpAMD64VMULPDMasked128 + OpAMD64VSCALEFPDMasked128 + OpAMD64VORPDMasked128 + OpAMD64VSQRTPDMasked128 + OpAMD64VXORPDMasked128 + OpAMD64VMAXPD128 + OpAMD64VMINPD128 + OpAMD64VMULPD128 + OpAMD64VSCALEFPD128 + OpAMD64VORPD128 + OpAMD64VHADDPD128 + OpAMD64VHSUBPD128 + OpAMD64VSQRTPD128 + OpAMD64VXORPD128 + OpAMD64VADDPD256 + OpAMD64VANDPD256 + OpAMD64VANDNPD256 + OpAMD64VRCP14PD256 + OpAMD64VRSQRT14PD256 + OpAMD64VDIVPD256 + OpAMD64VANDPDMasked256 + OpAMD64VANDNPDMasked256 + OpAMD64VRCP14PDMasked256 + OpAMD64VRSQRT14PDMasked256 + OpAMD64VDIVPDMasked256 + OpAMD64VMAXPDMasked256 + OpAMD64VMINPDMasked256 + OpAMD64VMULPDMasked256 + OpAMD64VSCALEFPDMasked256 + OpAMD64VORPDMasked256 + OpAMD64VSQRTPDMasked256 + OpAMD64VADDPDMasked256 + OpAMD64VXORPDMasked256 + OpAMD64VMAXPD256 + OpAMD64VMINPD256 + OpAMD64VMULPD256 + OpAMD64VSCALEFPD256 + OpAMD64VORPD256 + OpAMD64VHADDPD256 + OpAMD64VHSUBPD256 + OpAMD64VSQRTPD256 + OpAMD64VXORPD256 + OpAMD64VANDPD512 + OpAMD64VANDNPD512 + OpAMD64VRCP14PD512 + OpAMD64VRSQRT14PD512 + OpAMD64VDIVPD512 + OpAMD64VANDPDMasked512 + OpAMD64VANDNPDMasked512 + OpAMD64VRCP14PDMasked512 + OpAMD64VRSQRT14PDMasked512 + OpAMD64VDIVPDMasked512 + OpAMD64VMAXPDMasked512 + OpAMD64VMINPDMasked512 + OpAMD64VMULPDMasked512 + OpAMD64VSCALEFPDMasked512 + OpAMD64VORPDMasked512 + OpAMD64VSQRTPDMasked512 + OpAMD64VADDPDMasked512 + OpAMD64VXORPDMasked512 + OpAMD64VMAXPD512 + OpAMD64VMINPD512 + OpAMD64VMULPD512 + OpAMD64VSCALEFPD512 + OpAMD64VORPD512 + OpAMD64VSQRTPD512 + OpAMD64VADDPD512 + OpAMD64VXORPD512 + OpAMD64VPABSW256 + OpAMD64VPADDW256 + OpAMD64VPCMPEQW256 + OpAMD64VPCMPGTW256 + OpAMD64VPABSWMasked256 + OpAMD64VPADDWMasked256 + OpAMD64VPCMPEQWMasked256 + OpAMD64VPCMPGTWMasked256 + OpAMD64VPMAXSWMasked256 + OpAMD64VPMINSWMasked256 + OpAMD64VPMULHWMasked256 + OpAMD64VPMULLWMasked256 + OpAMD64VPADDSWMasked256 + OpAMD64VPSUBSWMasked256 + OpAMD64VPSUBWMasked256 + OpAMD64VPMAXSW256 + OpAMD64VPMINSW256 + OpAMD64VPMULHW256 + OpAMD64VPMULLW256 + OpAMD64VPHSUBW256 + OpAMD64VPHADDSW256 + OpAMD64VPHSUBSW256 + OpAMD64VPSUBSW256 + OpAMD64VPSIGNW256 + OpAMD64VPSUBW256 + OpAMD64VPABSW512 + OpAMD64VPADDW512 + OpAMD64VPCMPEQW512 + OpAMD64VPCMPGTW512 + OpAMD64VPABSWMasked512 + OpAMD64VPCMPEQWMasked512 + OpAMD64VPCMPGTWMasked512 + OpAMD64VPMAXSWMasked512 + OpAMD64VPMINSWMasked512 + OpAMD64VPMULHWMasked512 + OpAMD64VPMULLWMasked512 + OpAMD64VPMAXSW512 + OpAMD64VPMINSW512 + OpAMD64VPMULHW512 + OpAMD64VPMULLW512 + OpAMD64VPSUBSW512 + OpAMD64VPABSW128 + OpAMD64VPADDW128 + OpAMD64VPCMPEQW128 + OpAMD64VPCMPGTW128 + OpAMD64VPABSWMasked128 + OpAMD64VPCMPEQWMasked128 + OpAMD64VPCMPGTWMasked128 + OpAMD64VPMAXSWMasked128 + OpAMD64VPMINSWMasked128 + OpAMD64VPMULHWMasked128 + OpAMD64VPMULLWMasked128 + OpAMD64VPOPCNTWMasked128 + OpAMD64VPSUBSWMasked128 + OpAMD64VPMAXSW128 + OpAMD64VPMINSW128 + OpAMD64VPMULHW128 + OpAMD64VPMULLW128 + OpAMD64VPHSUBW128 + OpAMD64VPHADDSW128 + OpAMD64VPHSUBSW128 + OpAMD64VPSIGNW128 + OpAMD64VPABSD512 + OpAMD64VPANDD512 + OpAMD64VPABSDMasked512 + OpAMD64VPMAXSDMasked512 + OpAMD64VPMINSDMasked512 + OpAMD64VPMULLDMasked512 + OpAMD64VPOPCNTDMasked512 + OpAMD64VPSUBDMasked512 + OpAMD64VPXORDMasked512 + OpAMD64VPMAXSD512 + OpAMD64VPMINSD512 + OpAMD64VPMULLD512 + OpAMD64VPORD512 + OpAMD64VPXORD512 + OpAMD64VPABSD128 + OpAMD64VPCMPEQD128 + OpAMD64VPCMPGTD128 + OpAMD64VPABSDMasked128 + OpAMD64VPANDDMasked128 + OpAMD64VPMAXSDMasked128 + OpAMD64VPMINSDMasked128 + OpAMD64VPMULLDMasked128 + OpAMD64VPORDMasked128 + OpAMD64VPOPCNTDMasked128 + OpAMD64VPSUBDMasked128 + OpAMD64VPXORDMasked128 + OpAMD64VPMAXSD128 + OpAMD64VPMINSD128 + OpAMD64VPMULLD128 + OpAMD64VPHSUBD128 + OpAMD64VPSIGND128 + OpAMD64VPSUBD128 + OpAMD64VPABSD256 + OpAMD64VPAND256 + OpAMD64VPCMPEQD256 + OpAMD64VPCMPGTD256 + OpAMD64VPABSDMasked256 + OpAMD64VPMAXSDMasked256 + OpAMD64VPMINSDMasked256 + OpAMD64VPMULLDMasked256 + OpAMD64VPORDMasked256 + OpAMD64VPSUBDMasked256 + OpAMD64VPMAXSD256 + OpAMD64VPMINSD256 + OpAMD64VPMULLD256 + OpAMD64VPHSUBD256 + OpAMD64VPOPCNTD256 + OpAMD64VPSIGND256 + OpAMD64VPSUBD256 + OpAMD64VPABSQ128 + OpAMD64VPCMPEQQ128 + OpAMD64VPCMPGTQ128 + OpAMD64VPABSQMasked128 + OpAMD64VPANDQMasked128 + OpAMD64VPANDNQMasked128 + OpAMD64VPCMPEQQMasked128 + OpAMD64VPCMPGTQMasked128 + OpAMD64VPMAXSQMasked128 + OpAMD64VPMINSQMasked128 + OpAMD64VPMULDQMasked128 + OpAMD64VPMULLQMasked128 + OpAMD64VPSUBQMasked128 + OpAMD64VPMAXSQ128 + OpAMD64VPMINSQ128 + OpAMD64VPMULDQ128 + OpAMD64VPMULLQ128 + OpAMD64VPOR128 + OpAMD64VPABSQ256 + OpAMD64VPADDQ256 + OpAMD64VPCMPEQQ256 + OpAMD64VPCMPGTQ256 + OpAMD64VPABSQMasked256 + OpAMD64VPANDQMasked256 + OpAMD64VPANDNQMasked256 + OpAMD64VPCMPEQQMasked256 + OpAMD64VPCMPGTQMasked256 + OpAMD64VPMAXSQMasked256 + OpAMD64VPMINSQMasked256 + OpAMD64VPMULDQMasked256 + OpAMD64VPMULLQMasked256 + OpAMD64VPORQMasked256 + OpAMD64VPOPCNTQMasked256 + OpAMD64VPSUBQMasked256 + OpAMD64VPMAXSQ256 + OpAMD64VPMINSQ256 + OpAMD64VPMULDQ256 + OpAMD64VPMULLQ256 + OpAMD64VPOR256 + OpAMD64VPOPCNTQ256 + OpAMD64VPSUBQ256 + OpAMD64VPABSQ512 + OpAMD64VPANDQ512 + OpAMD64VPCMPEQQ512 + OpAMD64VPCMPGTQ512 + OpAMD64VPABSQMasked512 + OpAMD64VPADDQMasked512 + OpAMD64VPANDNQMasked512 + OpAMD64VPCMPEQQMasked512 + OpAMD64VPCMPGTQMasked512 + OpAMD64VPMAXSQMasked512 + OpAMD64VPMINSQMasked512 + OpAMD64VPMULDQMasked512 + OpAMD64VPMULLQMasked512 + OpAMD64VPMAXSQ512 + OpAMD64VPMINSQ512 + OpAMD64VPMULDQ512 + OpAMD64VPMULLQ512 + OpAMD64VPOPCNTQ512 + OpAMD64VPSUBQ512 + OpAMD64VPXORQ512 + OpAMD64VPABSB128 + OpAMD64VPADDB128 + OpAMD64VPAND128 + OpAMD64VPCMPEQB128 + OpAMD64VPCMPGTB128 + OpAMD64VPABSBMasked128 + OpAMD64VPADDBMasked128 + OpAMD64VPMAXSBMasked128 + OpAMD64VPMINSBMasked128 + OpAMD64VPSUBSBMasked128 + OpAMD64VPMAXSB128 + OpAMD64VPMINSB128 + OpAMD64VPSIGNB128 + OpAMD64VPSUBB128 + OpAMD64VPABSB256 + OpAMD64VPADDB256 + OpAMD64VPANDN256 + OpAMD64VPCMPEQB256 + OpAMD64VPCMPGTB256 + OpAMD64VPABSBMasked256 + OpAMD64VPMAXSBMasked256 + OpAMD64VPMINSBMasked256 + OpAMD64VPSUBSBMasked256 + OpAMD64VPMAXSB256 + OpAMD64VPMINSB256 + OpAMD64VPOPCNTB256 + OpAMD64VPSIGNB256 + OpAMD64VPABSB512 + OpAMD64VPABSBMasked512 + OpAMD64VPMAXSBMasked512 + OpAMD64VPMINSBMasked512 + OpAMD64VPADDSBMasked512 + OpAMD64VPMAXSB512 + OpAMD64VPMINSB512 + OpAMD64VPOPCNTB512 + OpAMD64VPSUBSB512 + OpAMD64VPSUBB512 + OpAMD64VPAVGW256 + OpAMD64VPAVGWMasked256 + OpAMD64VPMAXUWMasked256 + OpAMD64VPMINUWMasked256 + OpAMD64VPMULHUWMasked256 + OpAMD64VPOPCNTWMasked256 + OpAMD64VPMAXUW256 + OpAMD64VPMINUW256 + OpAMD64VPMULHUW256 + OpAMD64VPHADDW256 + OpAMD64VPOPCNTW256 + OpAMD64VPADDSW256 + OpAMD64VPAVGW512 + OpAMD64VPADDWMasked512 + OpAMD64VPAVGWMasked512 + OpAMD64VPMAXUWMasked512 + OpAMD64VPMINUWMasked512 + OpAMD64VPMULHUWMasked512 + OpAMD64VPOPCNTWMasked512 + OpAMD64VPADDSWMasked512 + OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBWMasked512 + OpAMD64VPMAXUW512 + OpAMD64VPMINUW512 + OpAMD64VPMULHUW512 + OpAMD64VPOPCNTW512 + OpAMD64VPADDSW512 + OpAMD64VPSUBW512 + OpAMD64VPAVGW128 + OpAMD64VPADDWMasked128 + OpAMD64VPAVGWMasked128 + OpAMD64VPMAXUWMasked128 + OpAMD64VPMINUWMasked128 + OpAMD64VPMULHUWMasked128 + OpAMD64VPADDSWMasked128 + OpAMD64VPSUBWMasked128 + OpAMD64VPMAXUW128 + OpAMD64VPMINUW128 + OpAMD64VPMULHUW128 + OpAMD64VPHADDW128 + OpAMD64VPOPCNTW128 + OpAMD64VPADDSW128 + OpAMD64VPSUBSW128 + OpAMD64VPSUBW128 + OpAMD64VPADDD512 + OpAMD64VPANDND512 + OpAMD64VPADDDMasked512 + OpAMD64VPANDDMasked512 + OpAMD64VPANDNDMasked512 + OpAMD64VPMAXUDMasked512 + OpAMD64VPMINUDMasked512 + OpAMD64VPORDMasked512 + OpAMD64VPMAXUD512 + OpAMD64VPMINUD512 + OpAMD64VPOPCNTD512 + OpAMD64VPSUBD512 + OpAMD64VPADDD128 + OpAMD64VPADDDMasked128 + OpAMD64VPANDNDMasked128 + OpAMD64VPMAXUDMasked128 + OpAMD64VPMINUDMasked128 + OpAMD64VPMAXUD128 + OpAMD64VPMINUD128 + OpAMD64VPHADDD128 + OpAMD64VPOPCNTD128 + OpAMD64VPADDD256 + OpAMD64VPADDDMasked256 + OpAMD64VPANDDMasked256 + OpAMD64VPANDNDMasked256 + OpAMD64VPMAXUDMasked256 + OpAMD64VPMINUDMasked256 + OpAMD64VPOPCNTDMasked256 + OpAMD64VPXORDMasked256 + OpAMD64VPMAXUD256 + OpAMD64VPMINUD256 + OpAMD64VPMULUDQ256 + OpAMD64VPHADDD256 + OpAMD64VPXOR256 + OpAMD64VPADDQ128 + OpAMD64VPADDQMasked128 + OpAMD64VPMAXUQMasked128 + OpAMD64VPMINUQMasked128 + OpAMD64VPMULUDQMasked128 + OpAMD64VPORQMasked128 + OpAMD64VPOPCNTQMasked128 + OpAMD64VPXORQMasked128 + OpAMD64VPMAXUQ128 + OpAMD64VPMINUQ128 + OpAMD64VPMULUDQ128 + OpAMD64VPOPCNTQ128 + OpAMD64VPSUBQ128 + OpAMD64VPXOR128 + OpAMD64VPADDQMasked256 + OpAMD64VPMAXUQMasked256 + OpAMD64VPMINUQMasked256 + OpAMD64VPMULUDQMasked256 + OpAMD64VPXORQMasked256 + OpAMD64VPMAXUQ256 + OpAMD64VPMINUQ256 + OpAMD64VPADDQ512 + OpAMD64VPANDNQ512 + OpAMD64VPANDQMasked512 + OpAMD64VPMAXUQMasked512 + OpAMD64VPMINUQMasked512 + OpAMD64VPMULUDQMasked512 + OpAMD64VPORQMasked512 + OpAMD64VPOPCNTQMasked512 + OpAMD64VPSUBQMasked512 + OpAMD64VPXORQMasked512 + OpAMD64VPMAXUQ512 + OpAMD64VPMINUQ512 + OpAMD64VPMULUDQ512 + OpAMD64VPORQ512 + OpAMD64VPANDN128 + OpAMD64VPAVGB128 + OpAMD64VPAVGBMasked128 + OpAMD64VPMAXUBMasked128 + OpAMD64VPMINUBMasked128 + OpAMD64VPOPCNTBMasked128 + OpAMD64VPADDSBMasked128 + OpAMD64VPSUBBMasked128 + OpAMD64VPMAXUB128 + OpAMD64VPMINUB128 + OpAMD64VPOPCNTB128 + OpAMD64VPADDSB128 + OpAMD64VPSUBSB128 + OpAMD64VPAVGB256 + OpAMD64VPADDBMasked256 + OpAMD64VPAVGBMasked256 + OpAMD64VPMAXUBMasked256 + OpAMD64VPMINUBMasked256 + OpAMD64VPOPCNTBMasked256 + OpAMD64VPADDSBMasked256 + OpAMD64VPSUBBMasked256 + OpAMD64VPMAXUB256 + OpAMD64VPMINUB256 + OpAMD64VPADDSB256 + OpAMD64VPSUBSB256 + OpAMD64VPSUBB256 + OpAMD64VPADDB512 + OpAMD64VPAVGB512 + OpAMD64VPADDBMasked512 + OpAMD64VPAVGBMasked512 + OpAMD64VPMAXUBMasked512 + OpAMD64VPMINUBMasked512 + OpAMD64VPOPCNTBMasked512 + OpAMD64VPSUBSBMasked512 + OpAMD64VPSUBBMasked512 + OpAMD64VPMAXUB512 + OpAMD64VPMINUB512 + OpAMD64VPADDSB512 + OpAMD64VCMPPS512 + OpAMD64VCMPPSMasked512 + OpAMD64VCMPPS128 + OpAMD64VCMPPSMasked128 + OpAMD64VCMPPS256 + OpAMD64VCMPPSMasked256 + OpAMD64VCMPPD128 + OpAMD64VCMPPDMasked128 + OpAMD64VCMPPD256 + OpAMD64VCMPPDMasked256 + OpAMD64VCMPPD512 + OpAMD64VCMPPDMasked512 + OpAMD64VPCMPW256 + OpAMD64VPCMPWMasked256 + OpAMD64VPCMPWMasked512 + OpAMD64VPCMPW512 + OpAMD64VPCMPW128 + OpAMD64VPCMPWMasked128 + OpAMD64VPCMPD512 + OpAMD64VPCMPDMasked512 + OpAMD64VPCMPDMasked128 + OpAMD64VPCMPD128 + OpAMD64VPCMPD256 + OpAMD64VPCMPDMasked256 + OpAMD64VPCMPQ128 + OpAMD64VPCMPQMasked128 + OpAMD64VPCMPQ256 + OpAMD64VPCMPQMasked256 + OpAMD64VPCMPQMasked512 + OpAMD64VPCMPQ512 + OpAMD64VPCMPBMasked128 + OpAMD64VPCMPB128 + OpAMD64VPCMPBMasked256 + OpAMD64VPCMPB256 + OpAMD64VPCMPB512 + OpAMD64VPCMPBMasked512 + OpAMD64VPCMPUW256 + OpAMD64VPCMPUWMasked256 + OpAMD64VPCMPUW512 + OpAMD64VPCMPUWMasked512 + OpAMD64VPCMPUW128 + OpAMD64VPCMPUWMasked128 + OpAMD64VPCMPUDMasked512 + OpAMD64VPCMPUD512 + OpAMD64VPCMPUD128 + OpAMD64VPCMPUDMasked128 + OpAMD64VPCMPUDMasked256 + OpAMD64VPCMPUD256 + OpAMD64VPCMPUQ128 + OpAMD64VPCMPUQMasked128 + OpAMD64VPCMPUQMasked256 + OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQ512 + OpAMD64VPCMPUQMasked512 + OpAMD64VPCMPUB128 + OpAMD64VPCMPUBMasked128 + OpAMD64VPCMPUB256 + OpAMD64VPCMPUBMasked256 + OpAMD64VPCMPUB512 + OpAMD64VPCMPUBMasked512 OpARMADD OpARMADDconst @@ -3422,6 +4006,1078 @@ const ( OpPrefetchCacheStreamed OpAdd32x4 OpZeroSIMD + OpAddFloat32x16 + OpAndFloat32x16 + OpAndNotFloat32x16 + OpApproximateReciprocalFloat32x16 + OpApproximateReciprocalOfSqrtFloat32x16 + OpDivFloat32x16 + OpEqualFloat32x16 + OpGreaterFloat32x16 + OpGreaterEqualFloat32x16 + OpIsNanFloat32x16 + OpLessFloat32x16 + OpLessEqualFloat32x16 + OpMaskedAddFloat32x16 + OpMaskedAndFloat32x16 + OpMaskedAndNotFloat32x16 + OpMaskedApproximateReciprocalFloat32x16 + OpMaskedApproximateReciprocalOfSqrtFloat32x16 + OpMaskedDivFloat32x16 + OpMaskedEqualFloat32x16 + OpMaskedGreaterFloat32x16 + OpMaskedGreaterEqualFloat32x16 + OpMaskedIsNanFloat32x16 + OpMaskedLessFloat32x16 + OpMaskedLessEqualFloat32x16 + OpMaskedMaxFloat32x16 + OpMaskedMinFloat32x16 + OpMaskedMulFloat32x16 + OpMaskedMulByPowOf2Float32x16 + OpMaskedNotEqualFloat32x16 + OpMaskedOrFloat32x16 + OpMaskedSqrtFloat32x16 + OpMaskedSubFloat32x16 + OpMaskedXorFloat32x16 + OpMaxFloat32x16 + OpMinFloat32x16 + OpMulFloat32x16 + OpMulByPowOf2Float32x16 + OpNotEqualFloat32x16 + OpOrFloat32x16 + OpSqrtFloat32x16 + OpSubFloat32x16 + OpXorFloat32x16 + OpAddFloat32x4 + OpAndFloat32x4 + OpAndNotFloat32x4 + OpApproximateReciprocalFloat32x4 + OpApproximateReciprocalOfSqrtFloat32x4 + OpDivFloat32x4 + OpEqualFloat32x4 + OpGreaterFloat32x4 + OpGreaterEqualFloat32x4 + OpIsNanFloat32x4 + OpLessFloat32x4 + OpLessEqualFloat32x4 + OpMaskedAddFloat32x4 + OpMaskedAndFloat32x4 + OpMaskedAndNotFloat32x4 + OpMaskedApproximateReciprocalFloat32x4 + OpMaskedApproximateReciprocalOfSqrtFloat32x4 + OpMaskedDivFloat32x4 + OpMaskedEqualFloat32x4 + OpMaskedGreaterFloat32x4 + OpMaskedGreaterEqualFloat32x4 + OpMaskedIsNanFloat32x4 + OpMaskedLessFloat32x4 + OpMaskedLessEqualFloat32x4 + OpMaskedMaxFloat32x4 + OpMaskedMinFloat32x4 + OpMaskedMulFloat32x4 + OpMaskedMulByPowOf2Float32x4 + OpMaskedNotEqualFloat32x4 + OpMaskedOrFloat32x4 + OpMaskedSqrtFloat32x4 + OpMaskedSubFloat32x4 + OpMaskedXorFloat32x4 + OpMaxFloat32x4 + OpMinFloat32x4 + OpMulFloat32x4 + OpMulByPowOf2Float32x4 + OpNotEqualFloat32x4 + OpOrFloat32x4 + OpPairwiseAddFloat32x4 + OpPairwiseSubFloat32x4 + OpSqrtFloat32x4 + OpSubFloat32x4 + OpXorFloat32x4 + OpAddFloat32x8 + OpAndFloat32x8 + OpAndNotFloat32x8 + OpApproximateReciprocalFloat32x8 + OpApproximateReciprocalOfSqrtFloat32x8 + OpDivFloat32x8 + OpEqualFloat32x8 + OpGreaterFloat32x8 + OpGreaterEqualFloat32x8 + OpIsNanFloat32x8 + OpLessFloat32x8 + OpLessEqualFloat32x8 + OpMaskedAddFloat32x8 + OpMaskedAndFloat32x8 + OpMaskedAndNotFloat32x8 + OpMaskedApproximateReciprocalFloat32x8 + OpMaskedApproximateReciprocalOfSqrtFloat32x8 + OpMaskedDivFloat32x8 + OpMaskedEqualFloat32x8 + OpMaskedGreaterFloat32x8 + OpMaskedGreaterEqualFloat32x8 + OpMaskedIsNanFloat32x8 + OpMaskedLessFloat32x8 + OpMaskedLessEqualFloat32x8 + OpMaskedMaxFloat32x8 + OpMaskedMinFloat32x8 + OpMaskedMulFloat32x8 + OpMaskedMulByPowOf2Float32x8 + OpMaskedNotEqualFloat32x8 + OpMaskedOrFloat32x8 + OpMaskedSqrtFloat32x8 + OpMaskedSubFloat32x8 + OpMaskedXorFloat32x8 + OpMaxFloat32x8 + OpMinFloat32x8 + OpMulFloat32x8 + OpMulByPowOf2Float32x8 + OpNotEqualFloat32x8 + OpOrFloat32x8 + OpPairwiseAddFloat32x8 + OpPairwiseSubFloat32x8 + OpSqrtFloat32x8 + OpSubFloat32x8 + OpXorFloat32x8 + OpAddFloat64x2 + OpAndFloat64x2 + OpAndNotFloat64x2 + OpApproximateReciprocalFloat64x2 + OpApproximateReciprocalOfSqrtFloat64x2 + OpDivFloat64x2 + OpEqualFloat64x2 + OpGreaterFloat64x2 + OpGreaterEqualFloat64x2 + OpIsNanFloat64x2 + OpLessFloat64x2 + OpLessEqualFloat64x2 + OpMaskedAddFloat64x2 + OpMaskedAndFloat64x2 + OpMaskedAndNotFloat64x2 + OpMaskedApproximateReciprocalFloat64x2 + OpMaskedApproximateReciprocalOfSqrtFloat64x2 + OpMaskedDivFloat64x2 + OpMaskedEqualFloat64x2 + OpMaskedGreaterFloat64x2 + OpMaskedGreaterEqualFloat64x2 + OpMaskedIsNanFloat64x2 + OpMaskedLessFloat64x2 + OpMaskedLessEqualFloat64x2 + OpMaskedMaxFloat64x2 + OpMaskedMinFloat64x2 + OpMaskedMulFloat64x2 + OpMaskedMulByPowOf2Float64x2 + OpMaskedNotEqualFloat64x2 + OpMaskedOrFloat64x2 + OpMaskedSqrtFloat64x2 + OpMaskedSubFloat64x2 + OpMaskedXorFloat64x2 + OpMaxFloat64x2 + OpMinFloat64x2 + OpMulFloat64x2 + OpMulByPowOf2Float64x2 + OpNotEqualFloat64x2 + OpOrFloat64x2 + OpPairwiseAddFloat64x2 + OpPairwiseSubFloat64x2 + OpSqrtFloat64x2 + OpSubFloat64x2 + OpXorFloat64x2 + OpAddFloat64x4 + OpAndFloat64x4 + OpAndNotFloat64x4 + OpApproximateReciprocalFloat64x4 + OpApproximateReciprocalOfSqrtFloat64x4 + OpDivFloat64x4 + OpEqualFloat64x4 + OpGreaterFloat64x4 + OpGreaterEqualFloat64x4 + OpIsNanFloat64x4 + OpLessFloat64x4 + OpLessEqualFloat64x4 + OpMaskedAddFloat64x4 + OpMaskedAndFloat64x4 + OpMaskedAndNotFloat64x4 + OpMaskedApproximateReciprocalFloat64x4 + OpMaskedApproximateReciprocalOfSqrtFloat64x4 + OpMaskedDivFloat64x4 + OpMaskedEqualFloat64x4 + OpMaskedGreaterFloat64x4 + OpMaskedGreaterEqualFloat64x4 + OpMaskedIsNanFloat64x4 + OpMaskedLessFloat64x4 + OpMaskedLessEqualFloat64x4 + OpMaskedMaxFloat64x4 + OpMaskedMinFloat64x4 + OpMaskedMulFloat64x4 + OpMaskedMulByPowOf2Float64x4 + OpMaskedNotEqualFloat64x4 + OpMaskedOrFloat64x4 + OpMaskedSqrtFloat64x4 + OpMaskedSubFloat64x4 + OpMaskedXorFloat64x4 + OpMaxFloat64x4 + OpMinFloat64x4 + OpMulFloat64x4 + OpMulByPowOf2Float64x4 + OpNotEqualFloat64x4 + OpOrFloat64x4 + OpPairwiseAddFloat64x4 + OpPairwiseSubFloat64x4 + OpSqrtFloat64x4 + OpSubFloat64x4 + OpXorFloat64x4 + OpAddFloat64x8 + OpAndFloat64x8 + OpAndNotFloat64x8 + OpApproximateReciprocalFloat64x8 + OpApproximateReciprocalOfSqrtFloat64x8 + OpDivFloat64x8 + OpEqualFloat64x8 + OpGreaterFloat64x8 + OpGreaterEqualFloat64x8 + OpIsNanFloat64x8 + OpLessFloat64x8 + OpLessEqualFloat64x8 + OpMaskedAddFloat64x8 + OpMaskedAndFloat64x8 + OpMaskedAndNotFloat64x8 + OpMaskedApproximateReciprocalFloat64x8 + OpMaskedApproximateReciprocalOfSqrtFloat64x8 + OpMaskedDivFloat64x8 + OpMaskedEqualFloat64x8 + OpMaskedGreaterFloat64x8 + OpMaskedGreaterEqualFloat64x8 + OpMaskedIsNanFloat64x8 + OpMaskedLessFloat64x8 + OpMaskedLessEqualFloat64x8 + OpMaskedMaxFloat64x8 + OpMaskedMinFloat64x8 + OpMaskedMulFloat64x8 + OpMaskedMulByPowOf2Float64x8 + OpMaskedNotEqualFloat64x8 + OpMaskedOrFloat64x8 + OpMaskedSqrtFloat64x8 + OpMaskedSubFloat64x8 + OpMaskedXorFloat64x8 + OpMaxFloat64x8 + OpMinFloat64x8 + OpMulFloat64x8 + OpMulByPowOf2Float64x8 + OpNotEqualFloat64x8 + OpOrFloat64x8 + OpSqrtFloat64x8 + OpSubFloat64x8 + OpXorFloat64x8 + OpAbsoluteInt16x16 + OpAddInt16x16 + OpAndInt16x16 + OpAndNotInt16x16 + OpEqualInt16x16 + OpGreaterInt16x16 + OpGreaterEqualInt16x16 + OpLessInt16x16 + OpLessEqualInt16x16 + OpMaskedAbsoluteInt16x16 + OpMaskedAddInt16x16 + OpMaskedEqualInt16x16 + OpMaskedGreaterInt16x16 + OpMaskedGreaterEqualInt16x16 + OpMaskedLessInt16x16 + OpMaskedLessEqualInt16x16 + OpMaskedMaxInt16x16 + OpMaskedMinInt16x16 + OpMaskedMulHighInt16x16 + OpMaskedMulLowInt16x16 + OpMaskedNotEqualInt16x16 + OpMaskedPopCountInt16x16 + OpMaskedSaturatedAddInt16x16 + OpMaskedSaturatedSubInt16x16 + OpMaskedSubInt16x16 + OpMaxInt16x16 + OpMinInt16x16 + OpMulHighInt16x16 + OpMulLowInt16x16 + OpNotEqualInt16x16 + OpOrInt16x16 + OpPairwiseAddInt16x16 + OpPairwiseSubInt16x16 + OpPopCountInt16x16 + OpSaturatedAddInt16x16 + OpSaturatedPairwiseAddInt16x16 + OpSaturatedPairwiseSubInt16x16 + OpSaturatedSubInt16x16 + OpSignInt16x16 + OpSubInt16x16 + OpXorInt16x16 + OpAbsoluteInt16x32 + OpAddInt16x32 + OpEqualInt16x32 + OpGreaterInt16x32 + OpGreaterEqualInt16x32 + OpLessInt16x32 + OpLessEqualInt16x32 + OpMaskedAbsoluteInt16x32 + OpMaskedAddInt16x32 + OpMaskedEqualInt16x32 + OpMaskedGreaterInt16x32 + OpMaskedGreaterEqualInt16x32 + OpMaskedLessInt16x32 + OpMaskedLessEqualInt16x32 + OpMaskedMaxInt16x32 + OpMaskedMinInt16x32 + OpMaskedMulHighInt16x32 + OpMaskedMulLowInt16x32 + OpMaskedNotEqualInt16x32 + OpMaskedPopCountInt16x32 + OpMaskedSaturatedAddInt16x32 + OpMaskedSaturatedSubInt16x32 + OpMaskedSubInt16x32 + OpMaxInt16x32 + OpMinInt16x32 + OpMulHighInt16x32 + OpMulLowInt16x32 + OpNotEqualInt16x32 + OpPopCountInt16x32 + OpSaturatedAddInt16x32 + OpSaturatedSubInt16x32 + OpSubInt16x32 + OpAbsoluteInt16x8 + OpAddInt16x8 + OpAndInt16x8 + OpAndNotInt16x8 + OpEqualInt16x8 + OpGreaterInt16x8 + OpGreaterEqualInt16x8 + OpLessInt16x8 + OpLessEqualInt16x8 + OpMaskedAbsoluteInt16x8 + OpMaskedAddInt16x8 + OpMaskedEqualInt16x8 + OpMaskedGreaterInt16x8 + OpMaskedGreaterEqualInt16x8 + OpMaskedLessInt16x8 + OpMaskedLessEqualInt16x8 + OpMaskedMaxInt16x8 + OpMaskedMinInt16x8 + OpMaskedMulHighInt16x8 + OpMaskedMulLowInt16x8 + OpMaskedNotEqualInt16x8 + OpMaskedPopCountInt16x8 + OpMaskedSaturatedAddInt16x8 + OpMaskedSaturatedSubInt16x8 + OpMaskedSubInt16x8 + OpMaxInt16x8 + OpMinInt16x8 + OpMulHighInt16x8 + OpMulLowInt16x8 + OpNotEqualInt16x8 + OpOrInt16x8 + OpPairwiseAddInt16x8 + OpPairwiseSubInt16x8 + OpPopCountInt16x8 + OpSaturatedAddInt16x8 + OpSaturatedPairwiseAddInt16x8 + OpSaturatedPairwiseSubInt16x8 + OpSaturatedSubInt16x8 + OpSignInt16x8 + OpSubInt16x8 + OpXorInt16x8 + OpAbsoluteInt32x16 + OpAddInt32x16 + OpAndInt32x16 + OpAndNotInt32x16 + OpEqualInt32x16 + OpGreaterInt32x16 + OpGreaterEqualInt32x16 + OpLessInt32x16 + OpLessEqualInt32x16 + OpMaskedAbsoluteInt32x16 + OpMaskedAddInt32x16 + OpMaskedAndInt32x16 + OpMaskedAndNotInt32x16 + OpMaskedEqualInt32x16 + OpMaskedGreaterInt32x16 + OpMaskedGreaterEqualInt32x16 + OpMaskedLessInt32x16 + OpMaskedLessEqualInt32x16 + OpMaskedMaxInt32x16 + OpMaskedMinInt32x16 + OpMaskedMulLowInt32x16 + OpMaskedNotEqualInt32x16 + OpMaskedOrInt32x16 + OpMaskedPopCountInt32x16 + OpMaskedSubInt32x16 + OpMaskedXorInt32x16 + OpMaxInt32x16 + OpMinInt32x16 + OpMulLowInt32x16 + OpNotEqualInt32x16 + OpOrInt32x16 + OpPopCountInt32x16 + OpSubInt32x16 + OpXorInt32x16 + OpAbsoluteInt32x4 + OpAddInt32x4 + OpAndInt32x4 + OpAndNotInt32x4 + OpEqualInt32x4 + OpGreaterInt32x4 + OpGreaterEqualInt32x4 + OpLessInt32x4 + OpLessEqualInt32x4 + OpMaskedAbsoluteInt32x4 + OpMaskedAddInt32x4 + OpMaskedAndInt32x4 + OpMaskedAndNotInt32x4 + OpMaskedEqualInt32x4 + OpMaskedGreaterInt32x4 + OpMaskedGreaterEqualInt32x4 + OpMaskedLessInt32x4 + OpMaskedLessEqualInt32x4 + OpMaskedMaxInt32x4 + OpMaskedMinInt32x4 + OpMaskedMulLowInt32x4 + OpMaskedNotEqualInt32x4 + OpMaskedOrInt32x4 + OpMaskedPopCountInt32x4 + OpMaskedSubInt32x4 + OpMaskedXorInt32x4 + OpMaxInt32x4 + OpMinInt32x4 + OpMulEvenWidenInt32x4 + OpMulLowInt32x4 + OpNotEqualInt32x4 + OpOrInt32x4 + OpPairwiseAddInt32x4 + OpPairwiseSubInt32x4 + OpPopCountInt32x4 + OpSignInt32x4 + OpSubInt32x4 + OpXorInt32x4 + OpAbsoluteInt32x8 + OpAddInt32x8 + OpAndInt32x8 + OpAndNotInt32x8 + OpEqualInt32x8 + OpGreaterInt32x8 + OpGreaterEqualInt32x8 + OpLessInt32x8 + OpLessEqualInt32x8 + OpMaskedAbsoluteInt32x8 + OpMaskedAddInt32x8 + OpMaskedAndInt32x8 + OpMaskedAndNotInt32x8 + OpMaskedEqualInt32x8 + OpMaskedGreaterInt32x8 + OpMaskedGreaterEqualInt32x8 + OpMaskedLessInt32x8 + OpMaskedLessEqualInt32x8 + OpMaskedMaxInt32x8 + OpMaskedMinInt32x8 + OpMaskedMulLowInt32x8 + OpMaskedNotEqualInt32x8 + OpMaskedOrInt32x8 + OpMaskedPopCountInt32x8 + OpMaskedSubInt32x8 + OpMaskedXorInt32x8 + OpMaxInt32x8 + OpMinInt32x8 + OpMulEvenWidenInt32x8 + OpMulLowInt32x8 + OpNotEqualInt32x8 + OpOrInt32x8 + OpPairwiseAddInt32x8 + OpPairwiseSubInt32x8 + OpPopCountInt32x8 + OpSignInt32x8 + OpSubInt32x8 + OpXorInt32x8 + OpAbsoluteInt64x2 + OpAddInt64x2 + OpAndInt64x2 + OpAndNotInt64x2 + OpEqualInt64x2 + OpGreaterInt64x2 + OpGreaterEqualInt64x2 + OpLessInt64x2 + OpLessEqualInt64x2 + OpMaskedAbsoluteInt64x2 + OpMaskedAddInt64x2 + OpMaskedAndInt64x2 + OpMaskedAndNotInt64x2 + OpMaskedEqualInt64x2 + OpMaskedGreaterInt64x2 + OpMaskedGreaterEqualInt64x2 + OpMaskedLessInt64x2 + OpMaskedLessEqualInt64x2 + OpMaskedMaxInt64x2 + OpMaskedMinInt64x2 + OpMaskedMulEvenWidenInt64x2 + OpMaskedMulLowInt64x2 + OpMaskedNotEqualInt64x2 + OpMaskedOrInt64x2 + OpMaskedPopCountInt64x2 + OpMaskedSubInt64x2 + OpMaskedXorInt64x2 + OpMaxInt64x2 + OpMinInt64x2 + OpMulEvenWidenInt64x2 + OpMulLowInt64x2 + OpNotEqualInt64x2 + OpOrInt64x2 + OpPopCountInt64x2 + OpSubInt64x2 + OpXorInt64x2 + OpAbsoluteInt64x4 + OpAddInt64x4 + OpAndInt64x4 + OpAndNotInt64x4 + OpEqualInt64x4 + OpGreaterInt64x4 + OpGreaterEqualInt64x4 + OpLessInt64x4 + OpLessEqualInt64x4 + OpMaskedAbsoluteInt64x4 + OpMaskedAddInt64x4 + OpMaskedAndInt64x4 + OpMaskedAndNotInt64x4 + OpMaskedEqualInt64x4 + OpMaskedGreaterInt64x4 + OpMaskedGreaterEqualInt64x4 + OpMaskedLessInt64x4 + OpMaskedLessEqualInt64x4 + OpMaskedMaxInt64x4 + OpMaskedMinInt64x4 + OpMaskedMulEvenWidenInt64x4 + OpMaskedMulLowInt64x4 + OpMaskedNotEqualInt64x4 + OpMaskedOrInt64x4 + OpMaskedPopCountInt64x4 + OpMaskedSubInt64x4 + OpMaskedXorInt64x4 + OpMaxInt64x4 + OpMinInt64x4 + OpMulEvenWidenInt64x4 + OpMulLowInt64x4 + OpNotEqualInt64x4 + OpOrInt64x4 + OpPopCountInt64x4 + OpSubInt64x4 + OpXorInt64x4 + OpAbsoluteInt64x8 + OpAddInt64x8 + OpAndInt64x8 + OpAndNotInt64x8 + OpEqualInt64x8 + OpGreaterInt64x8 + OpGreaterEqualInt64x8 + OpLessInt64x8 + OpLessEqualInt64x8 + OpMaskedAbsoluteInt64x8 + OpMaskedAddInt64x8 + OpMaskedAndInt64x8 + OpMaskedAndNotInt64x8 + OpMaskedEqualInt64x8 + OpMaskedGreaterInt64x8 + OpMaskedGreaterEqualInt64x8 + OpMaskedLessInt64x8 + OpMaskedLessEqualInt64x8 + OpMaskedMaxInt64x8 + OpMaskedMinInt64x8 + OpMaskedMulEvenWidenInt64x8 + OpMaskedMulLowInt64x8 + OpMaskedNotEqualInt64x8 + OpMaskedOrInt64x8 + OpMaskedPopCountInt64x8 + OpMaskedSubInt64x8 + OpMaskedXorInt64x8 + OpMaxInt64x8 + OpMinInt64x8 + OpMulEvenWidenInt64x8 + OpMulLowInt64x8 + OpNotEqualInt64x8 + OpOrInt64x8 + OpPopCountInt64x8 + OpSubInt64x8 + OpXorInt64x8 + OpAbsoluteInt8x16 + OpAddInt8x16 + OpAndInt8x16 + OpAndNotInt8x16 + OpEqualInt8x16 + OpGreaterInt8x16 + OpGreaterEqualInt8x16 + OpLessInt8x16 + OpLessEqualInt8x16 + OpMaskedAbsoluteInt8x16 + OpMaskedAddInt8x16 + OpMaskedEqualInt8x16 + OpMaskedGreaterInt8x16 + OpMaskedGreaterEqualInt8x16 + OpMaskedLessInt8x16 + OpMaskedLessEqualInt8x16 + OpMaskedMaxInt8x16 + OpMaskedMinInt8x16 + OpMaskedNotEqualInt8x16 + OpMaskedPopCountInt8x16 + OpMaskedSaturatedAddInt8x16 + OpMaskedSaturatedSubInt8x16 + OpMaskedSubInt8x16 + OpMaxInt8x16 + OpMinInt8x16 + OpNotEqualInt8x16 + OpOrInt8x16 + OpPopCountInt8x16 + OpSaturatedAddInt8x16 + OpSaturatedSubInt8x16 + OpSignInt8x16 + OpSubInt8x16 + OpXorInt8x16 + OpAbsoluteInt8x32 + OpAddInt8x32 + OpAndInt8x32 + OpAndNotInt8x32 + OpEqualInt8x32 + OpGreaterInt8x32 + OpGreaterEqualInt8x32 + OpLessInt8x32 + OpLessEqualInt8x32 + OpMaskedAbsoluteInt8x32 + OpMaskedAddInt8x32 + OpMaskedEqualInt8x32 + OpMaskedGreaterInt8x32 + OpMaskedGreaterEqualInt8x32 + OpMaskedLessInt8x32 + OpMaskedLessEqualInt8x32 + OpMaskedMaxInt8x32 + OpMaskedMinInt8x32 + OpMaskedNotEqualInt8x32 + OpMaskedPopCountInt8x32 + OpMaskedSaturatedAddInt8x32 + OpMaskedSaturatedSubInt8x32 + OpMaskedSubInt8x32 + OpMaxInt8x32 + OpMinInt8x32 + OpNotEqualInt8x32 + OpOrInt8x32 + OpPopCountInt8x32 + OpSaturatedAddInt8x32 + OpSaturatedSubInt8x32 + OpSignInt8x32 + OpSubInt8x32 + OpXorInt8x32 + OpAbsoluteInt8x64 + OpAddInt8x64 + OpEqualInt8x64 + OpGreaterInt8x64 + OpGreaterEqualInt8x64 + OpLessInt8x64 + OpLessEqualInt8x64 + OpMaskedAbsoluteInt8x64 + OpMaskedAddInt8x64 + OpMaskedEqualInt8x64 + OpMaskedGreaterInt8x64 + OpMaskedGreaterEqualInt8x64 + OpMaskedLessInt8x64 + OpMaskedLessEqualInt8x64 + OpMaskedMaxInt8x64 + OpMaskedMinInt8x64 + OpMaskedNotEqualInt8x64 + OpMaskedPopCountInt8x64 + OpMaskedSaturatedAddInt8x64 + OpMaskedSaturatedSubInt8x64 + OpMaskedSubInt8x64 + OpMaxInt8x64 + OpMinInt8x64 + OpNotEqualInt8x64 + OpPopCountInt8x64 + OpSaturatedAddInt8x64 + OpSaturatedSubInt8x64 + OpSubInt8x64 + OpAddUint16x16 + OpAndUint16x16 + OpAndNotUint16x16 + OpAverageUint16x16 + OpEqualUint16x16 + OpGreaterUint16x16 + OpGreaterEqualUint16x16 + OpLessUint16x16 + OpLessEqualUint16x16 + OpMaskedAddUint16x16 + OpMaskedAverageUint16x16 + OpMaskedEqualUint16x16 + OpMaskedGreaterUint16x16 + OpMaskedGreaterEqualUint16x16 + OpMaskedLessUint16x16 + OpMaskedLessEqualUint16x16 + OpMaskedMaxUint16x16 + OpMaskedMinUint16x16 + OpMaskedMulHighUint16x16 + OpMaskedNotEqualUint16x16 + OpMaskedPopCountUint16x16 + OpMaskedSaturatedAddUint16x16 + OpMaskedSaturatedSubUint16x16 + OpMaskedSubUint16x16 + OpMaxUint16x16 + OpMinUint16x16 + OpMulHighUint16x16 + OpNotEqualUint16x16 + OpOrUint16x16 + OpPairwiseAddUint16x16 + OpPairwiseSubUint16x16 + OpPopCountUint16x16 + OpSaturatedAddUint16x16 + OpSaturatedSubUint16x16 + OpSubUint16x16 + OpXorUint16x16 + OpAddUint16x32 + OpAverageUint16x32 + OpEqualUint16x32 + OpGreaterUint16x32 + OpGreaterEqualUint16x32 + OpLessUint16x32 + OpLessEqualUint16x32 + OpMaskedAddUint16x32 + OpMaskedAverageUint16x32 + OpMaskedEqualUint16x32 + OpMaskedGreaterUint16x32 + OpMaskedGreaterEqualUint16x32 + OpMaskedLessUint16x32 + OpMaskedLessEqualUint16x32 + OpMaskedMaxUint16x32 + OpMaskedMinUint16x32 + OpMaskedMulHighUint16x32 + OpMaskedNotEqualUint16x32 + OpMaskedPopCountUint16x32 + OpMaskedSaturatedAddUint16x32 + OpMaskedSaturatedSubUint16x32 + OpMaskedSubUint16x32 + OpMaxUint16x32 + OpMinUint16x32 + OpMulHighUint16x32 + OpNotEqualUint16x32 + OpPopCountUint16x32 + OpSaturatedAddUint16x32 + OpSaturatedSubUint16x32 + OpSubUint16x32 + OpAddUint16x8 + OpAndUint16x8 + OpAndNotUint16x8 + OpAverageUint16x8 + OpEqualUint16x8 + OpGreaterUint16x8 + OpGreaterEqualUint16x8 + OpLessUint16x8 + OpLessEqualUint16x8 + OpMaskedAddUint16x8 + OpMaskedAverageUint16x8 + OpMaskedEqualUint16x8 + OpMaskedGreaterUint16x8 + OpMaskedGreaterEqualUint16x8 + OpMaskedLessUint16x8 + OpMaskedLessEqualUint16x8 + OpMaskedMaxUint16x8 + OpMaskedMinUint16x8 + OpMaskedMulHighUint16x8 + OpMaskedNotEqualUint16x8 + OpMaskedPopCountUint16x8 + OpMaskedSaturatedAddUint16x8 + OpMaskedSaturatedSubUint16x8 + OpMaskedSubUint16x8 + OpMaxUint16x8 + OpMinUint16x8 + OpMulHighUint16x8 + OpNotEqualUint16x8 + OpOrUint16x8 + OpPairwiseAddUint16x8 + OpPairwiseSubUint16x8 + OpPopCountUint16x8 + OpSaturatedAddUint16x8 + OpSaturatedSubUint16x8 + OpSubUint16x8 + OpXorUint16x8 + OpAddUint32x16 + OpAndUint32x16 + OpAndNotUint32x16 + OpEqualUint32x16 + OpGreaterUint32x16 + OpGreaterEqualUint32x16 + OpLessUint32x16 + OpLessEqualUint32x16 + OpMaskedAddUint32x16 + OpMaskedAndUint32x16 + OpMaskedAndNotUint32x16 + OpMaskedEqualUint32x16 + OpMaskedGreaterUint32x16 + OpMaskedGreaterEqualUint32x16 + OpMaskedLessUint32x16 + OpMaskedLessEqualUint32x16 + OpMaskedMaxUint32x16 + OpMaskedMinUint32x16 + OpMaskedNotEqualUint32x16 + OpMaskedOrUint32x16 + OpMaskedPopCountUint32x16 + OpMaskedSubUint32x16 + OpMaskedXorUint32x16 + OpMaxUint32x16 + OpMinUint32x16 + OpNotEqualUint32x16 + OpOrUint32x16 + OpPopCountUint32x16 + OpSubUint32x16 + OpXorUint32x16 + OpAddUint32x4 + OpAndUint32x4 + OpAndNotUint32x4 + OpEqualUint32x4 + OpGreaterUint32x4 + OpGreaterEqualUint32x4 + OpLessUint32x4 + OpLessEqualUint32x4 + OpMaskedAddUint32x4 + OpMaskedAndUint32x4 + OpMaskedAndNotUint32x4 + OpMaskedEqualUint32x4 + OpMaskedGreaterUint32x4 + OpMaskedGreaterEqualUint32x4 + OpMaskedLessUint32x4 + OpMaskedLessEqualUint32x4 + OpMaskedMaxUint32x4 + OpMaskedMinUint32x4 + OpMaskedNotEqualUint32x4 + OpMaskedOrUint32x4 + OpMaskedPopCountUint32x4 + OpMaskedSubUint32x4 + OpMaskedXorUint32x4 + OpMaxUint32x4 + OpMinUint32x4 + OpMulEvenWidenUint32x4 + OpNotEqualUint32x4 + OpOrUint32x4 + OpPairwiseAddUint32x4 + OpPairwiseSubUint32x4 + OpPopCountUint32x4 + OpSubUint32x4 + OpXorUint32x4 + OpAddUint32x8 + OpAndUint32x8 + OpAndNotUint32x8 + OpEqualUint32x8 + OpGreaterUint32x8 + OpGreaterEqualUint32x8 + OpLessUint32x8 + OpLessEqualUint32x8 + OpMaskedAddUint32x8 + OpMaskedAndUint32x8 + OpMaskedAndNotUint32x8 + OpMaskedEqualUint32x8 + OpMaskedGreaterUint32x8 + OpMaskedGreaterEqualUint32x8 + OpMaskedLessUint32x8 + OpMaskedLessEqualUint32x8 + OpMaskedMaxUint32x8 + OpMaskedMinUint32x8 + OpMaskedNotEqualUint32x8 + OpMaskedOrUint32x8 + OpMaskedPopCountUint32x8 + OpMaskedSubUint32x8 + OpMaskedXorUint32x8 + OpMaxUint32x8 + OpMinUint32x8 + OpMulEvenWidenUint32x8 + OpNotEqualUint32x8 + OpOrUint32x8 + OpPairwiseAddUint32x8 + OpPairwiseSubUint32x8 + OpPopCountUint32x8 + OpSubUint32x8 + OpXorUint32x8 + OpAddUint64x2 + OpAndUint64x2 + OpAndNotUint64x2 + OpEqualUint64x2 + OpGreaterUint64x2 + OpGreaterEqualUint64x2 + OpLessUint64x2 + OpLessEqualUint64x2 + OpMaskedAddUint64x2 + OpMaskedAndUint64x2 + OpMaskedAndNotUint64x2 + OpMaskedEqualUint64x2 + OpMaskedGreaterUint64x2 + OpMaskedGreaterEqualUint64x2 + OpMaskedLessUint64x2 + OpMaskedLessEqualUint64x2 + OpMaskedMaxUint64x2 + OpMaskedMinUint64x2 + OpMaskedMulEvenWidenUint64x2 + OpMaskedNotEqualUint64x2 + OpMaskedOrUint64x2 + OpMaskedPopCountUint64x2 + OpMaskedSubUint64x2 + OpMaskedXorUint64x2 + OpMaxUint64x2 + OpMinUint64x2 + OpMulEvenWidenUint64x2 + OpNotEqualUint64x2 + OpOrUint64x2 + OpPopCountUint64x2 + OpSubUint64x2 + OpXorUint64x2 + OpAddUint64x4 + OpAndUint64x4 + OpAndNotUint64x4 + OpEqualUint64x4 + OpGreaterUint64x4 + OpGreaterEqualUint64x4 + OpLessUint64x4 + OpLessEqualUint64x4 + OpMaskedAddUint64x4 + OpMaskedAndUint64x4 + OpMaskedAndNotUint64x4 + OpMaskedEqualUint64x4 + OpMaskedGreaterUint64x4 + OpMaskedGreaterEqualUint64x4 + OpMaskedLessUint64x4 + OpMaskedLessEqualUint64x4 + OpMaskedMaxUint64x4 + OpMaskedMinUint64x4 + OpMaskedMulEvenWidenUint64x4 + OpMaskedNotEqualUint64x4 + OpMaskedOrUint64x4 + OpMaskedPopCountUint64x4 + OpMaskedSubUint64x4 + OpMaskedXorUint64x4 + OpMaxUint64x4 + OpMinUint64x4 + OpMulEvenWidenUint64x4 + OpNotEqualUint64x4 + OpOrUint64x4 + OpPopCountUint64x4 + OpSubUint64x4 + OpXorUint64x4 + OpAddUint64x8 + OpAndUint64x8 + OpAndNotUint64x8 + OpEqualUint64x8 + OpGreaterUint64x8 + OpGreaterEqualUint64x8 + OpLessUint64x8 + OpLessEqualUint64x8 + OpMaskedAddUint64x8 + OpMaskedAndUint64x8 + OpMaskedAndNotUint64x8 + OpMaskedEqualUint64x8 + OpMaskedGreaterUint64x8 + OpMaskedGreaterEqualUint64x8 + OpMaskedLessUint64x8 + OpMaskedLessEqualUint64x8 + OpMaskedMaxUint64x8 + OpMaskedMinUint64x8 + OpMaskedMulEvenWidenUint64x8 + OpMaskedNotEqualUint64x8 + OpMaskedOrUint64x8 + OpMaskedPopCountUint64x8 + OpMaskedSubUint64x8 + OpMaskedXorUint64x8 + OpMaxUint64x8 + OpMinUint64x8 + OpMulEvenWidenUint64x8 + OpNotEqualUint64x8 + OpOrUint64x8 + OpPopCountUint64x8 + OpSubUint64x8 + OpXorUint64x8 + OpAddUint8x16 + OpAndUint8x16 + OpAndNotUint8x16 + OpAverageUint8x16 + OpEqualUint8x16 + OpGreaterUint8x16 + OpGreaterEqualUint8x16 + OpLessUint8x16 + OpLessEqualUint8x16 + OpMaskedAddUint8x16 + OpMaskedAverageUint8x16 + OpMaskedEqualUint8x16 + OpMaskedGreaterUint8x16 + OpMaskedGreaterEqualUint8x16 + OpMaskedLessUint8x16 + OpMaskedLessEqualUint8x16 + OpMaskedMaxUint8x16 + OpMaskedMinUint8x16 + OpMaskedNotEqualUint8x16 + OpMaskedPopCountUint8x16 + OpMaskedSaturatedAddUint8x16 + OpMaskedSaturatedSubUint8x16 + OpMaskedSubUint8x16 + OpMaxUint8x16 + OpMinUint8x16 + OpNotEqualUint8x16 + OpOrUint8x16 + OpPopCountUint8x16 + OpSaturatedAddUint8x16 + OpSaturatedSubUint8x16 + OpSubUint8x16 + OpXorUint8x16 + OpAddUint8x32 + OpAndUint8x32 + OpAndNotUint8x32 + OpAverageUint8x32 + OpEqualUint8x32 + OpGreaterUint8x32 + OpGreaterEqualUint8x32 + OpLessUint8x32 + OpLessEqualUint8x32 + OpMaskedAddUint8x32 + OpMaskedAverageUint8x32 + OpMaskedEqualUint8x32 + OpMaskedGreaterUint8x32 + OpMaskedGreaterEqualUint8x32 + OpMaskedLessUint8x32 + OpMaskedLessEqualUint8x32 + OpMaskedMaxUint8x32 + OpMaskedMinUint8x32 + OpMaskedNotEqualUint8x32 + OpMaskedPopCountUint8x32 + OpMaskedSaturatedAddUint8x32 + OpMaskedSaturatedSubUint8x32 + OpMaskedSubUint8x32 + OpMaxUint8x32 + OpMinUint8x32 + OpNotEqualUint8x32 + OpOrUint8x32 + OpPopCountUint8x32 + OpSaturatedAddUint8x32 + OpSaturatedSubUint8x32 + OpSubUint8x32 + OpXorUint8x32 + OpAddUint8x64 + OpAverageUint8x64 + OpEqualUint8x64 + OpGreaterUint8x64 + OpGreaterEqualUint8x64 + OpLessUint8x64 + OpLessEqualUint8x64 + OpMaskedAddUint8x64 + OpMaskedAverageUint8x64 + OpMaskedEqualUint8x64 + OpMaskedGreaterUint8x64 + OpMaskedGreaterEqualUint8x64 + OpMaskedLessUint8x64 + OpMaskedLessEqualUint8x64 + OpMaskedMaxUint8x64 + OpMaskedMinUint8x64 + OpMaskedNotEqualUint8x64 + OpMaskedPopCountUint8x64 + OpMaskedSaturatedAddUint8x64 + OpMaskedSaturatedSubUint8x64 + OpMaskedSubUint8x64 + OpMaxUint8x64 + OpMinUint8x64 + OpNotEqualUint8x64 + OpPopCountUint8x64 + OpSaturatedAddUint8x64 + OpSaturatedSubUint8x64 + OpSubUint8x64 ) var opcodeTable = [...]opInfo{ @@ -16017,8730 +17673,9349 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADD", + name: "VADDPS512", argLen: 2, commutative: true, - asm: arm.AADD, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VANDPS512", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm.ASUB, + name: "VANDNPS512", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VRCP14PS512", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSB", - argLen: 2, - asm: arm.ARSB, + name: "VRSQRT14PS512", + argLen: 1, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VDIVPS512", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MUL", - argLen: 2, + name: "VANDPSMasked512", + argLen: 3, commutative: true, - asm: arm.AMUL, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMUL", - argLen: 2, + name: "VANDNPSMasked512", + argLen: 3, commutative: true, - asm: arm.AMULL, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMULU", - argLen: 2, - commutative: true, - asm: arm.AMULLU, + name: "VRCP14PSMasked512", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLudiv", - argLen: 2, - clobberFlags: true, + name: "VRSQRT14PSMasked512", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDS", - argLen: 2, - commutative: true, - asm: arm.AADD, + name: "VDIVPSMasked512", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VMAXPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADC", + name: "VMINPSMasked512", argLen: 3, commutative: true, - asm: arm.AADC, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.AADC, + name: "VMULPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBS", - argLen: 2, - asm: arm.ASUB, + name: "VSCALEFPSMasked512", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VORPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VSQRTPSMasked512", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBC", + name: "VADDPSMasked512", argLen: 3, - asm: arm.ASBC, - reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - }, - }, - { - name: "SBCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ASBC, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSC, + name: "VXORPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULLU", + name: "VMAXPS512", argLen: 2, commutative: true, - asm: arm.AMULLU, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULA", - argLen: 3, - asm: arm.AMULA, + name: "VMINPS512", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULS", - argLen: 3, - asm: arm.AMULS, + name: "VMULPS512", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: arm.AADDF, + name: "VSCALEFPS512", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDD", + name: "VORPS512", argLen: 2, commutative: true, - asm: arm.AADDD, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBF", - argLen: 2, - asm: arm.ASUBF, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBD", - argLen: 2, - asm: arm.ASUBD, + name: "VXORPS512", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULF", + name: "VANDPS128", argLen: 2, commutative: true, - asm: arm.AMULF, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULD", + name: "VANDNPS128", argLen: 2, commutative: true, - asm: arm.AMULD, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NMULF", - argLen: 2, - commutative: true, - asm: arm.ANMULF, + name: "VRCP14PS128", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NMULD", - argLen: 2, - commutative: true, - asm: arm.ANMULD, + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVF", + name: "VDIVPS128", argLen: 2, - asm: arm.ADIVF, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVD", - argLen: 2, - asm: arm.ADIVD, + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAF, + name: "VANDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAD, + name: "VANDNPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSF, + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSD, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AFMULAD, + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "AND", - argLen: 2, + name: "VMAXPSMasked128", + argLen: 3, commutative: true, - asm: arm.AAND, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AAND, + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "OR", - argLen: 2, + name: "VMULPSMasked128", + argLen: 3, commutative: true, - asm: arm.AORR, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AORR, + name: "VSCALEFPSMasked128", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XOR", - argLen: 2, + name: "VORPSMasked128", + argLen: 3, commutative: true, - asm: arm.AEOR, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AEOR, + name: "VSQRTPSMasked128", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm.ABIC, + name: "VXORPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ABIC, + name: "VMAXPS128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFX", - auxType: auxInt32, - argLen: 1, - asm: arm.ABFX, + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFXU", - auxType: auxInt32, - argLen: 1, - asm: arm.ABFXU, + name: "VMULPS128", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVN", - argLen: 1, - asm: arm.AMVN, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: arm.ANEGF, + name: "VORPS128", + argLen: 2, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGD", - argLen: 1, - asm: arm.ANEGD, + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: arm.ASQRTD, + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SQRTF", + name: "VSQRTPS128", argLen: 1, - asm: arm.ASQRTF, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ABSD", - argLen: 1, - asm: arm.AABSD, + name: "VADDPS128", + argLen: 2, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm.ACLZ, + name: "VXORPS128", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV", - argLen: 1, - asm: arm.AREV, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV16", - argLen: 1, - asm: arm.AREV16, + name: "VANDPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm.ARBIT, + name: "VANDNPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLL", - argLen: 2, - asm: arm.ASLL, + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASLL, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRL", + name: "VDIVPS256", argLen: 2, - asm: arm.ASRL, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRL, + name: "VANDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm.ASRA, + name: "VANDNPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRA, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRR", + name: "VRSQRT14PSMasked256", argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRRconst", - auxType: auxInt32, - argLen: 1, + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VSQRTPSMasked256", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VADDPSMasked256", + argLen: 3, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VXORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VMAXPS256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VMULPS256", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VORPS256", + argLen: 2, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VHADDPS256", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VHSUBPS256", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VSQRTPS256", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VXORPS256", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VADDPD128", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRR", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VANDPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VANDNPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftLL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VDIVPD128", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRA", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VANDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VANDNPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VDIVPDMasked128", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VXORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VMINPD128", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VSCALEFPD128", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VORPD128", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLLreg", - argLen: 3, - asm: arm.AADD, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRLreg", - argLen: 3, - asm: arm.AADD, + name: "VXORPD128", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "VANDPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "VANDNPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftLLreg", - argLen: 3, - asm: arm.ARSB, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "VDIVPD256", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "VANDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLLreg", - argLen: 3, - asm: arm.AAND, + name: "VANDNPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRLreg", - argLen: 3, - asm: arm.AAND, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRAreg", - argLen: 3, - asm: arm.AAND, + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLLreg", + name: "VDIVPDMasked256", argLen: 3, - asm: arm.AORR, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRLreg", - argLen: 3, - asm: arm.AORR, + name: "VMAXPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRAreg", - argLen: 3, - asm: arm.AORR, + name: "VMINPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLLreg", - argLen: 3, - asm: arm.AEOR, + name: "VMULPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRLreg", + name: "VSCALEFPDMasked256", argLen: 3, - asm: arm.AEOR, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRAreg", - argLen: 3, - asm: arm.AEOR, + name: "VORPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftLLreg", - argLen: 3, - asm: arm.ABIC, + name: "VSQRTPDMasked256", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRLreg", + name: "VADDPDMasked256", argLen: 3, - asm: arm.ABIC, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRAreg", - argLen: 3, - asm: arm.ABIC, + name: "VXORPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftLLreg", - argLen: 2, - asm: arm.AMVN, + name: "VMAXPD256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRLreg", - argLen: 2, - asm: arm.AMVN, + name: "VMINPD256", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRAreg", - argLen: 2, - asm: arm.AMVN, + name: "VMULPD256", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftLLreg", - argLen: 4, - asm: arm.AADC, + name: "VSCALEFPD256", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRLreg", - argLen: 4, - asm: arm.AADC, + name: "VORPD256", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRAreg", - argLen: 4, - asm: arm.AADC, + name: "VHADDPD256", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftLLreg", - argLen: 4, - asm: arm.ASBC, + name: "VHSUBPD256", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRLreg", - argLen: 4, - asm: arm.ASBC, + name: "VSQRTPD256", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRAreg", - argLen: 4, - asm: arm.ASBC, + name: "VXORPD256", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftLLreg", - argLen: 4, - asm: arm.ARSC, + name: "VANDPD512", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRLreg", - argLen: 4, - asm: arm.ARSC, + name: "VANDNPD512", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRAreg", - argLen: 4, - asm: arm.ARSC, + name: "VRCP14PD512", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftLLreg", - argLen: 3, - asm: arm.AADD, + name: "VRSQRT14PD512", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRLreg", - argLen: 3, - asm: arm.AADD, + name: "VDIVPD512", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "VANDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "VANDNPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "VRCP14PDMasked512", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftLLreg", + name: "VDIVPDMasked512", argLen: 3, - asm: arm.ARSB, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "VMAXPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "VMINPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm.ACMP, + name: "VMULPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ACMP, + name: "VSCALEFPDMasked512", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMN", - argLen: 2, + name: "VORPDMasked512", + argLen: 3, commutative: true, - asm: arm.ACMN, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ACMN, + name: "VSQRTPDMasked512", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TST", - argLen: 2, - commutative: true, - asm: arm.ATST, + name: "VADDPDMasked512", + argLen: 3, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ATST, + name: "VXORPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQ", + name: "VMAXPD512", argLen: 2, commutative: true, - asm: arm.ATEQ, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ATEQ, + name: "VMINPD512", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPF", - argLen: 2, - asm: arm.ACMPF, + name: "VMULPD512", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPD", + name: "VSCALEFPD512", argLen: 2, - asm: arm.ACMPD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + name: "VORPD512", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + name: "VSQRTPD512", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + name: "VADDPD512", + argLen: 2, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "VXORPD512", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "VPCMPEQW256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "VPCMPGTW256", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "VPABSWMasked256", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "VPADDWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "VPCMPEQWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "TEQshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "VPCMPGTWMasked256", + argLen: 3, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "CMPshiftLLreg", - argLen: 3, - asm: arm.ACMP, + name: "VPMAXSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRLreg", - argLen: 3, - asm: arm.ACMP, + name: "VPMINSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRAreg", - argLen: 3, - asm: arm.ACMP, + name: "VPMULHWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftLLreg", - argLen: 3, - asm: arm.ACMN, + name: "VPMULLWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRLreg", - argLen: 3, - asm: arm.ACMN, + name: "VPADDSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRAreg", + name: "VPSUBSWMasked256", argLen: 3, - asm: arm.ACMN, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftLLreg", + name: "VPSUBWMasked256", argLen: 3, - asm: arm.ATST, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRLreg", - argLen: 3, - asm: arm.ATST, + name: "VPMAXSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRAreg", - argLen: 3, - asm: arm.ATST, + name: "VPMINSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftLLreg", - argLen: 3, - asm: arm.ATEQ, + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftRLreg", - argLen: 3, - asm: arm.ATEQ, + name: "VPMULLW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftRAreg", - argLen: 3, - asm: arm.ATEQ, + name: "VPHSUBW256", + argLen: 2, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPF0", - argLen: 1, - asm: arm.ACMPF, + name: "VPHADDSW256", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPD0", - argLen: 1, - asm: arm.ACMPD, + name: "VPHSUBSW256", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVW, - reg: regInfo{ outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVF, + name: "VPSUBSW256", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVD, + name: "VPSIGNW256", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm.AMOVW, + name: "VPSUBW256", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294975488}, // SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVB, + name: "VPABSW512", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVBU, + name: "VPADDW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVH, + name: "VPCMPEQW512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVHU, + name: "VPCMPGTW512", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVW, + name: "VPABSWMasked512", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVF, + name: "VPCMPEQWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVD, + name: "VPCMPGTWMasked512", + argLen: 3, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVB, + name: "VPMAXSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVH, + name: "VPMINSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVW, + name: "VPMULHWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVF, + name: "VPMULLWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVD, + name: "VPMAXSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm.AMOVW, + name: "VPMINSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + name: "VPMULHW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + name: "VPMULLW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + name: "VPSUBSW512", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm.AMOVBU, + name: "VPABSW128", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm.AMOVB, + name: "VPADDW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm.AMOVHU, + name: "VPCMPEQW128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm.AMOVH, + name: "VPCMPGTW128", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm.AMOVW, + name: "VPABSWMasked128", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreshiftLL", - auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + name: "VPCMPEQWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVWstoreshiftRL", - auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + name: "VPCMPGTWMasked128", + argLen: 3, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVWstoreshiftRA", - auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + name: "VPMAXSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm.AMOVB, + name: "VPMINSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm.AMOVH, + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm.AMOVBS, + name: "VPMULLWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm.AMOVBU, + name: "VPOPCNTWMasked128", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm.AMOVHS, + name: "VPSUBSWMasked128", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm.AMOVHU, + name: "VPMAXSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: arm.AMOVW, + name: "VPMINSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: arm.AMOVWF, + name: "VPMULLW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: arm.AMOVWD, + name: "VPHSUBW128", + argLen: 2, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUF", - argLen: 1, - asm: arm.AMOVWF, + name: "VPHADDSW128", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUD", - argLen: 1, - asm: arm.AMOVWD, + name: "VPHSUBSW128", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFW", - argLen: 1, - asm: arm.AMOVFW, + name: "VPSIGNW128", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDW", + name: "VPABSD512", argLen: 1, - asm: arm.AMOVDW, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFWU", - argLen: 1, - asm: arm.AMOVFW, + name: "VPANDD512", + argLen: 2, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDWU", - argLen: 1, - asm: arm.AMOVDW, + name: "VPABSDMasked512", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: arm.AMOVFD, + name: "VPMAXSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: arm.AMOVDF, + name: "VPMINSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMOVWHSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "VPMULLDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMOVWLSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "VPOPCNTDMasked512", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRAcond", + name: "VPSUBDMasked512", argLen: 3, - asm: arm.ASRA, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "VPXORDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {1, 128}, // R7 - {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "VPMAXSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "VPMINSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "Equal", - argLen: 1, - reg: regInfo{ outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "VPMULLD512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessThan", - argLen: 1, + name: "VPORD512", + argLen: 2, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterThan", + name: "VPABSD128", argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "VPCMPEQD128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "VPCMPGTD128", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "VPABSDMasked128", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "VPANDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "VPMAXSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "VPMINSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20482, // R1 R12 R14 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "VPMULLDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, + name: "VPORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "VPOPCNTDMasked128", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 6, // R1 R2 - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 128}, // R7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "VPSUBDMasked128", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "VPXORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPMAXSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPMINSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPMULLD128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, + name: "VPHSUBD128", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicExtendB", - auxType: auxInt64, - argLen: 4, - call: true, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 2}, // R1 - {2, 4}, // R2 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, + name: "VPSUBD128", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 1}, // R0 - {2, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", + name: "VPABSD256", argLen: 1, - reg: regInfo{}, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + asm: x86.AVPABSD, reg: regInfo{ - clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 256}, // R8 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, - { - name: "ADCSflags", - argLen: 3, + name: "VPAND256", + argLen: 2, commutative: true, - asm: arm64.AADCS, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCzerocarry", - argLen: 1, - asm: arm64.AADC, + name: "VPCMPEQD256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - asm: arm64.AADD, + name: "VPCMPGTD256", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADD, + name: "VPABSDMasked256", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSconstflags", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADDS, + name: "VPMAXSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSflags", - argLen: 2, + name: "VPMINSDMasked256", + argLen: 3, commutative: true, - asm: arm64.AADDS, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm64.ASUB, + name: "VPMULLDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ASUB, + name: "VPORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCSflags", + name: "VPSUBDMasked256", argLen: 3, - asm: arm64.ASBCS, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSflags", - argLen: 2, - asm: arm64.ASUBS, + name: "VPMAXSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MUL", + name: "VPMINSD256", argLen: 2, commutative: true, - asm: arm64.AMUL, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULW", + name: "VPMULLD256", argLen: 2, commutative: true, - asm: arm64.AMULW, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MNEG", - argLen: 2, - commutative: true, - asm: arm64.AMNEG, + name: "VPHSUBD256", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MNEGW", - argLen: 2, - commutative: true, - asm: arm64.AMNEGW, + name: "VPOPCNTD256", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULH", - argLen: 2, - commutative: true, - asm: arm64.ASMULH, + name: "VPSIGND256", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UMULH", - argLen: 2, - commutative: true, - asm: arm64.AUMULH, + name: "VPSUBD256", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULL", - argLen: 2, - commutative: true, - asm: arm64.ASMULL, + name: "VPABSQ128", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UMULL", + name: "VPCMPEQQ128", argLen: 2, commutative: true, - asm: arm64.AUMULL, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIV", + name: "VPCMPGTQ128", argLen: 2, - asm: arm64.ASDIV, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "UDIV", + name: "VPABSQMasked128", argLen: 2, - asm: arm64.AUDIV, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVW", - argLen: 2, - asm: arm64.ASDIVW, + name: "VPANDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UDIVW", - argLen: 2, - asm: arm64.AUDIVW, + name: "VPANDNQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOD", - argLen: 2, - asm: arm64.AREM, + name: "VPCMPEQQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "UMOD", - argLen: 2, - asm: arm64.AUREM, + name: "VPCMPGTQMasked128", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MODW", - argLen: 2, - asm: arm64.AREMW, + name: "VPMAXSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UMODW", - argLen: 2, - asm: arm64.AUREMW, + name: "VPMINSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FADDS", - argLen: 2, + name: "VPMULDQMasked128", + argLen: 3, commutative: true, - asm: arm64.AFADDS, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FADDD", - argLen: 2, + name: "VPMULLQMasked128", + argLen: 3, commutative: true, - asm: arm64.AFADDD, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: arm64.AFSUBS, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: arm64.AFSUBD, + name: "VPMAXSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULS", + name: "VPMINSQ128", argLen: 2, commutative: true, - asm: arm64.AFMULS, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULD", + name: "VPMULDQ128", argLen: 2, commutative: true, - asm: arm64.AFMULD, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMULS", + name: "VPMULLQ128", argLen: 2, commutative: true, - asm: arm64.AFNMULS, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMULD", + name: "VPOR128", argLen: 2, commutative: true, - asm: arm64.AFNMULD, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: arm64.AFDIVS, + name: "VPABSQ256", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: arm64.AFDIVD, + name: "VPADDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "AND", + name: "VPCMPEQQ256", argLen: 2, commutative: true, - asm: arm64.AAND, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AAND, + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: arm64.AORR, + name: "VPABSQMasked256", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AORR, + name: "VPANDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XOR", - argLen: 2, + name: "VPANDNQMasked256", + argLen: 3, commutative: true, - asm: arm64.AEOR, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AEOR, + name: "VPCMPEQQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm64.ABIC, + name: "VPCMPGTQMasked256", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "EON", - argLen: 2, - asm: arm64.AEON, + name: "VPMAXSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORN", - argLen: 2, - asm: arm64.AORN, + name: "VPMINSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVN", - argLen: 1, - asm: arm64.AMVN, + name: "VPMULDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEG", - argLen: 1, - asm: arm64.ANEG, + name: "VPMULLQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGSflags", - argLen: 1, - asm: arm64.ANEGS, + name: "VPORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NGCzerocarry", - argLen: 1, - asm: arm64.ANGC, + name: "VPOPCNTQMasked256", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FABSD", - argLen: 1, - asm: arm64.AFABSD, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNEGS", - argLen: 1, - asm: arm64.AFNEGS, + name: "VPMAXSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNEGD", - argLen: 1, - asm: arm64.AFNEGD, + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSQRTD", - argLen: 1, - asm: arm64.AFSQRTD, + name: "VPMULDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: arm64.AFSQRTS, + name: "VPMULLQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMIND", - argLen: 2, - asm: arm64.AFMIND, + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMINS", - argLen: 2, - asm: arm64.AFMINS, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMAXD", + name: "VPSUBQ256", argLen: 2, - asm: arm64.AFMAXD, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMAXS", - argLen: 2, - asm: arm64.AFMAXS, + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV", - argLen: 1, - asm: arm64.AREV, + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REVW", - argLen: 1, - asm: arm64.AREVW, + name: "VPCMPEQQ512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "REV16", - argLen: 1, - asm: arm64.AREV16, + name: "VPCMPGTQ512", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "REV16W", - argLen: 1, - asm: arm64.AREV16W, + name: "VPABSQMasked512", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm64.ARBIT, + name: "VPADDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBITW", - argLen: 1, - asm: arm64.ARBITW, + name: "VPANDNQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm64.ACLZ, + name: "VPCMPEQQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "CLZW", - argLen: 1, - asm: arm64.ACLZW, + name: "VPCMPGTQMasked512", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCNT", - argLen: 1, - asm: arm64.AVCNT, + name: "VPMAXSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VUADDLV", - argLen: 1, - asm: arm64.AVUADDLV, + name: "VPMINSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "VPMULDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "VPMULLQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMADDS", - argLen: 3, - asm: arm64.AFMADDS, + name: "VPMAXSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMADDD", - argLen: 3, - asm: arm64.AFMADDD, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMADDS", - argLen: 3, - asm: arm64.AFNMADDS, + name: "VPMULDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMADDD", - argLen: 3, - asm: arm64.AFNMADDD, + name: "VPMULLQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: arm64.AFMSUBS, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMSUBD", - argLen: 3, - asm: arm64.AFMSUBD, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMSUBS", - argLen: 3, - asm: arm64.AFNMSUBS, + name: "VPXORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMSUBD", - argLen: 3, - asm: arm64.AFNMSUBD, + name: "VPABSB128", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MADD", - argLen: 3, - asm: arm64.AMADD, + name: "VPADDB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MADDW", - argLen: 3, - asm: arm64.AMADDW, + name: "VPAND128", + argLen: 2, + commutative: true, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MSUB", - argLen: 3, - asm: arm64.AMSUB, + name: "VPCMPEQB128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MSUBW", - argLen: 3, - asm: arm64.AMSUBW, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLL", + name: "VPABSBMasked128", argLen: 2, - asm: arm64.ALSL, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSL, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRL", - argLen: 2, - asm: arm64.ALSR, + name: "VPMAXSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSR, + name: "VPMINSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm64.AASR, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AASR, + name: "VPMAXSB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ROR", - argLen: 2, - asm: arm64.AROR, + name: "VPMINSB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RORW", + name: "VPSIGNB128", argLen: 2, - asm: arm64.ARORW, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AROR, + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RORWconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ARORW, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EXTRconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTR, + name: "VPADDB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EXTRWconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTRW, + name: "VPANDN256", + argLen: 2, + commutative: true, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm64.ACMP, + name: "VPCMPEQB256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMP, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPW", + name: "VPABSBMasked256", argLen: 2, - asm: arm64.ACMPW, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMPW, + name: "VPMAXSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMN", - argLen: 2, + name: "VPMINSBMasked256", + argLen: 3, commutative: true, - asm: arm64.ACMN, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMN, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNW", + name: "VPMAXSB256", argLen: 2, commutative: true, - asm: arm64.ACMNW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "CMNWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMNW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TST", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: arm64.ATST, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ATST, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTW", - argLen: 2, - commutative: true, - asm: arm64.ATSTW, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ATSTW, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPS", + name: "VPABSBMasked512", argLen: 2, - asm: arm64.AFCMPS, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPD", - argLen: 2, - asm: arm64.AFCMPD, + name: "VPMAXSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPS0", - argLen: 1, - asm: arm64.AFCMPS, + name: "VPMINSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPD0", - argLen: 1, - asm: arm64.AFCMPD, + name: "VPADDSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftLL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPMAXSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPMINSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRA", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRO", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGshiftLL", - auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGshiftRL", - auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + name: "VPAVGW256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGshiftRA", - auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + name: "VPAVGWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "VPMAXUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "VPMINUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "VPOPCNTWMasked256", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "VPMAXUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "VPMINUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPMULHUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPHADDW256", + argLen: 2, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPADDSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPAVGW512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPADDWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPAVGWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPMAXUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPMINUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPMULHUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPOPCNTWMasked512", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPADDSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPSUBWMasked512", + argLen: 3, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPMAXUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPMINUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPOPCNTW512", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPADDSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPAVGW128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPAVGWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPMAXUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "VPMINUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "VPMULHUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "VPSUBWMasked128", + argLen: 3, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "VPMAXUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "VPMINUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "VPHADDW128", + argLen: 2, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, - reg: regInfo{ + name: "VPADDSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFI", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFI, + name: "VPSUBSW128", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFXIL", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFXIL, + name: "VPSUBW128", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBFIZ", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.ASBFIZ, + name: "VPADDD512", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.ASBFX, + name: "VPANDND512", + argLen: 2, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UBFIZ", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFIZ, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFX, + name: "VPANDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: arm64.AMOVD, + name: "VPANDNDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVS, + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVD, + name: "VPMINUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm64.AMOVD, + name: "VPORDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037928517632}, // SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVB, + name: "VPMAXUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVBU, + name: "VPMINUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVH, + name: "VPOPCNTD512", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVHU, + name: "VPSUBD512", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVW, + name: "VPADDD128", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVWU, + name: "VPADDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVD, + name: "VPANDNDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVS, + name: "VPMAXUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVD, + name: "VPMINUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LDP", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDP, + name: "VPMAXUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LDPW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPW, + name: "VPMINUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LDPSW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPSW, + name: "VPHADDD128", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FLDPD", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPD, + name: "VPOPCNTD128", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FLDPS", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPS, + name: "VPADDD256", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: arm64.AMOVD, + name: "VPADDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm64.AMOVW, + name: "VPANDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: arm64.AMOVWU, + name: "VPANDNDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm64.AMOVH, + name: "VPMAXUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm64.AMOVHU, + name: "VPMINUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm64.AMOVB, + name: "VPOPCNTDMasked256", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm64.AMOVBU, + name: "VPXORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: arm64.AFMOVS, + name: "VPMAXUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDloadidx", - argLen: 3, - asm: arm64.AFMOVD, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHloadidx2", - argLen: 3, - asm: arm64.AMOVH, + name: "VPMULUDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUloadidx2", - argLen: 3, - asm: arm64.AMOVHU, + name: "VPHADDD256", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadidx4", - argLen: 3, - asm: arm64.AMOVW, + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUloadidx4", - argLen: 3, - asm: arm64.AMOVWU, + name: "VPADDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDloadidx8", - argLen: 3, - asm: arm64.AMOVD, + name: "VPADDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSloadidx4", - argLen: 3, - asm: arm64.AFMOVS, + name: "VPMAXUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDloadidx8", - argLen: 3, - asm: arm64.AFMOVD, + name: "VPMINUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVB, + name: "VPMULUDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVH, - reg: regInfo{ + name: "VPORQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, + reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVW, + name: "VPOPCNTQMasked128", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVD, + name: "VPXORQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVS, + name: "VPMAXUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVD, + name: "VPMINUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "STP", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTP, + name: "VPMULUDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "STPW", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTPW, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSTPD", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPD, + name: "VPSUBQ128", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSTPS", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPS, + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm64.AMOVB, + name: "VPADDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm64.AMOVH, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm64.AMOVW, + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: arm64.AMOVD, + name: "VPMULUDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: arm64.AFMOVS, + name: "VPXORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: arm64.AFMOVD, + name: "VPMAXUQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstoreidx2", - argLen: 4, - asm: arm64.AMOVH, + name: "VPMINUQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreidx4", - argLen: 4, - asm: arm64.AMOVW, + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstoreidx8", - argLen: 4, - asm: arm64.AMOVD, + name: "VPANDNQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSstoreidx4", - argLen: 4, - asm: arm64.AFMOVS, + name: "VPANDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDstoreidx8", - argLen: 4, - asm: arm64.AFMOVD, + name: "VPMAXUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDgpfp", - argLen: 1, - asm: arm64.AFMOVD, + name: "VPMINUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDfpgp", - argLen: 1, - asm: arm64.AFMOVD, + name: "VPMULUDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSgpfp", - argLen: 1, - asm: arm64.AFMOVS, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSfpgp", - argLen: 1, - asm: arm64.AFMOVS, + name: "VPOPCNTQMasked512", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm64.AMOVB, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm64.AMOVBU, + name: "VPXORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm64.AMOVH, + name: "VPMAXUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm64.AMOVHU, + name: "VPMINUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: arm64.AMOVW, + name: "VPMULUDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: arm64.AMOVWU, + name: "VPORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDreg", - argLen: 1, - asm: arm64.AMOVD, + name: "VPANDN128", + argLen: 2, + commutative: true, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "VPAVGB128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFWS", - argLen: 1, - asm: arm64.ASCVTFWS, + name: "VPAVGBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFWD", - argLen: 1, - asm: arm64.ASCVTFWD, + name: "VPMAXUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFWS", - argLen: 1, - asm: arm64.AUCVTFWS, + name: "VPMINUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFWD", - argLen: 1, - asm: arm64.AUCVTFWD, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFS", - argLen: 1, - asm: arm64.ASCVTFS, + name: "VPADDSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFD", - argLen: 1, - asm: arm64.ASCVTFD, + name: "VPSUBBMasked128", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFS", - argLen: 1, - asm: arm64.AUCVTFS, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFD", - argLen: 1, - asm: arm64.AUCVTFD, + name: "VPMINUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSSW", + name: "VPOPCNTB128", argLen: 1, - asm: arm64.AFCVTZSSW, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSDW", - argLen: 1, - asm: arm64.AFCVTZSDW, + name: "VPADDSB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUSW", - argLen: 1, - asm: arm64.AFCVTZUSW, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUDW", - argLen: 1, - asm: arm64.AFCVTZUDW, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSS", - argLen: 1, - asm: arm64.AFCVTZSS, + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSD", - argLen: 1, - asm: arm64.AFCVTZSD, + name: "VPAVGBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUS", - argLen: 1, - asm: arm64.AFCVTZUS, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUD", - argLen: 1, - asm: arm64.AFCVTZUD, + name: "VPMINUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTSD", - argLen: 1, - asm: arm64.AFCVTSD, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTDS", - argLen: 1, - asm: arm64.AFCVTDS, + name: "VPADDSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTAD", - argLen: 1, - asm: arm64.AFRINTAD, + name: "VPSUBBMasked256", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTMD", - argLen: 1, - asm: arm64.AFRINTMD, + name: "VPMAXUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTND", - argLen: 1, - asm: arm64.AFRINTND, + name: "VPMINUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTPD", - argLen: 1, - asm: arm64.AFRINTPD, + name: "VPADDSB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTZD", - argLen: 1, - asm: arm64.AFRINTZD, + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSEL", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSEL, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSEL0", - auxType: auxCCop, - argLen: 2, - asm: arm64.ACSEL, + name: "VPADDB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSINC", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINC, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSINV", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINV, + name: "VPADDBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSNEG", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSNEG, + name: "VPAVGBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSETM", - auxType: auxCCop, - argLen: 1, - asm: arm64.ACSETM, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "VPMINUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, + name: "VPOPCNTBMasked512", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // R26 - {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "VPSUBBMasked512", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "VPMAXUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "Equal", - argLen: 1, + name: "VPMINUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "VPADDSB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessThan", - argLen: 1, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "VCMPPSMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "VCMPPSMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "VCMPPSMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "VCMPPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessThanF", - argLen: 1, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqualF", - argLen: 1, + name: "VCMPPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterThanF", - argLen: 1, + name: "VCMPPD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterEqualF", - argLen: 1, + name: "VCMPPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotLessThanF", - argLen: 1, + name: "VPCMPW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotLessEqualF", - argLen: 1, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotGreaterThanF", - argLen: 1, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotGreaterEqualF", - argLen: 1, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessThanNoov", - argLen: 1, + name: "VPCMPW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterEqualNoov", - argLen: 1, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - unsafePoint: true, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "LoweredZero", - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 65536}, // R16 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 65536, // R16 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - unsafePoint: true, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "LoweredMove", - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "VPCMPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 131072}, // R17 - {1, 65536}, // R16 - {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 16973824, // R16 R17 R25 }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "VPCMPD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 33554432}, // R26 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "VPCMPQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LDAR", - argLen: 2, - faultOnNilArg0: true, - asm: arm64.ALDAR, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LDARB", - argLen: 2, - faultOnNilArg0: true, - asm: arm64.ALDARB, + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LDARW", - argLen: 2, - faultOnNilArg0: true, - asm: arm64.ALDARW, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "STLRB", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRB, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "STLR", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLR, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "STLRW", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRW, + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPB128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "VPCMPUDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "VPCMPUQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "VPCMPUQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "VPCMPUBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUB, reg: regInfo{ - clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, + name: "VPCMPUBMasked512", + auxType: auxInt8, argLen: 3, - call: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, + { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "PRFM", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: arm64.APRFM, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DMB", - auxType: auxInt64, - argLen: 1, - hasSideEffects: true, - asm: arm64.ADMB, - reg: regInfo{}, - }, - { - name: "ZERO", - argLen: 0, - zeroWidth: true, - fixedReg: true, - reg: regInfo{}, - }, - - { - name: "NEGV", - argLen: 1, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: loong64.ANEGF, + name: "SUB", + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGD", - argLen: 1, - asm: loong64.ANEGD, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: loong64.ASQRTD, + name: "RSB", + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: loong64.ASQRTF, + name: "RSBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ABSD", - argLen: 1, - asm: loong64.AABSD, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZW", - argLen: 1, - asm: loong64.ACLZW, + name: "HMUL", + argLen: 2, + commutative: true, + asm: arm.AMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZV", - argLen: 1, - asm: loong64.ACLZV, + name: "HMULU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CTZW", - argLen: 1, - asm: loong64.ACTZW, + name: "CALLudiv", + argLen: 2, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "CTZV", - argLen: 1, - asm: loong64.ACTZV, + name: "ADDS", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "REVB2H", - argLen: 1, - asm: loong64.AREVB2H, + name: "ADDSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "REVB2W", - argLen: 1, - asm: loong64.AREVB2W, + name: "ADC", + argLen: 3, + commutative: true, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "REVBV", - argLen: 1, - asm: loong64.AREVBV, + name: "ADCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BITREV4B", - argLen: 1, - asm: loong64.ABITREV4B, + name: "SUBS", + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BITREVW", - argLen: 1, - asm: loong64.ABITREVW, + name: "SUBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BITREVV", - argLen: 1, - asm: loong64.ABITREVV, + name: "RSBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VPCNT64", - argLen: 1, - asm: loong64.AVPCNTV, + name: "SBC", + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VPCNT32", - argLen: 1, - asm: loong64.AVPCNTW, + name: "SBCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VPCNT16", - argLen: 1, - asm: loong64.AVPCNTH, + name: "RSCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDV", + name: "MULLU", argLen: 2, commutative: true, - asm: loong64.AADDVU, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, - }, - }, - { - name: "ADDVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AADDVU, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBV", - argLen: 2, - asm: loong64.ASUBVU, + name: "MULA", + argLen: 3, + asm: arm.AMULA, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASUBVU, + name: "MULS", + argLen: 3, + asm: arm.AMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MULV", + name: "ADDF", argLen: 2, commutative: true, - asm: loong64.AMULV, + asm: arm.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULHV", + name: "ADDD", argLen: 2, commutative: true, - asm: loong64.AMULHV, + asm: arm.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULHVU", - argLen: 2, - commutative: true, - asm: loong64.AMULHVU, + name: "SUBF", + argLen: 2, + asm: arm.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVV", + name: "SUBD", argLen: 2, - asm: loong64.ADIVV, + asm: arm.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVVU", - argLen: 2, - asm: loong64.ADIVVU, + name: "MULF", + argLen: 2, + commutative: true, + asm: arm.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "REMV", - argLen: 2, - asm: loong64.AREMV, + name: "MULD", + argLen: 2, + commutative: true, + asm: arm.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "REMVU", - argLen: 2, - asm: loong64.AREMVU, + name: "NMULF", + argLen: 2, + commutative: true, + asm: arm.ANMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDF", + name: "NMULD", argLen: 2, commutative: true, - asm: loong64.AADDF, + asm: arm.ANMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: loong64.AADDD, + name: "DIVF", + argLen: 2, + asm: arm.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBF", + name: "DIVD", argLen: 2, - asm: loong64.ASUBF, + asm: arm.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBD", - argLen: 2, - asm: loong64.ASUBD, + name: "MULAF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: loong64.AMULF, + name: "MULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: loong64.AMULD, + name: "MULSF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVF", - argLen: 2, - asm: loong64.ADIVF, + name: "MULSD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVD", - argLen: 2, - asm: loong64.ADIVD, + name: "FMULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AFMULAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -24748,28 +27023,28 @@ var opcodeTable = [...]opInfo{ name: "AND", argLen: 2, commutative: true, - asm: loong64.AAND, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { name: "ANDconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, - asm: loong64.AAND, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, @@ -24777,28 +27052,28 @@ var opcodeTable = [...]opInfo{ name: "OR", argLen: 2, commutative: true, - asm: loong64.AOR, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { name: "ORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, - asm: loong64.AOR, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, @@ -24806,9066 +27081,8906 @@ var opcodeTable = [...]opInfo{ name: "XOR", argLen: 2, commutative: true, - asm: loong64.AXOR, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { name: "XORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, - asm: loong64.AXOR, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: loong64.ANOR, + name: "BIC", + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NORconst", - auxType: auxInt64, + name: "BICconst", + auxType: auxInt32, argLen: 1, - asm: loong64.ANOR, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ANDN", - argLen: 2, - asm: loong64.AANDN, + name: "BFX", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFX, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ORN", - argLen: 2, - asm: loong64.AORN, + name: "BFXU", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFXU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFMADDF, + name: "MVN", + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFMADDD, + name: "NEGF", + argLen: 1, + asm: arm.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBF, + name: "NEGD", + argLen: 1, + asm: arm.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBD, + name: "SQRTD", + argLen: 1, + asm: arm.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FNMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDF, + name: "SQRTF", + argLen: 1, + asm: arm.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDD, + name: "ABSD", + argLen: 1, + asm: arm.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FNMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBF, + name: "CLZ", + argLen: 1, + asm: arm.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBD, + name: "REV", + argLen: 1, + asm: arm.AREV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMINF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMINF, + name: "REV16", + argLen: 1, + asm: arm.AREV16, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMIND, + name: "RBIT", + argLen: 1, + asm: arm.ARBIT, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMAXF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXF, + name: "SLL", + argLen: 2, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXD, + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MASKEQZ", + name: "SRL", argLen: 2, - asm: loong64.AMASKEQZ, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MASKNEZ", - argLen: 2, - asm: loong64.AMASKNEZ, + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FCOPYSGD", + name: "SRA", argLen: 2, - asm: loong64.AFCOPYSGD, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLL", - argLen: 2, - asm: loong64.ASLL, + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLV", + name: "SRR", argLen: 2, - asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLconst", - auxType: auxInt64, + name: "SRRconst", + auxType: auxInt32, argLen: 1, - asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASLLV, + name: "ADDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRL", - argLen: 2, - asm: loong64.ASRL, + name: "ADDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLV", - argLen: 2, - asm: loong64.ASRLV, + name: "ADDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRL, + name: "SUBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRLV, + name: "SUBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRA", - argLen: 2, - asm: loong64.ASRA, + name: "SUBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAV", - argLen: 2, - asm: loong64.ASRAV, + name: "RSBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRA, + name: "RSBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRAV, + name: "RSBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTR", - argLen: 2, - asm: loong64.AROTR, + name: "ANDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTRV", - argLen: 2, - asm: loong64.AROTRV, + name: "ANDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTRconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTR, + name: "ANDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTRVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTRV, + name: "ORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGT", - argLen: 2, - asm: loong64.ASGT, + name: "ORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGT, + name: "ORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTU", - argLen: 2, - asm: loong64.ASGTU, + name: "XORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTUconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGTU, + name: "XORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPEQF", - argLen: 2, - asm: loong64.ACMPEQF, + name: "XORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: loong64.ACMPEQD, + name: "XORshiftRR", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: loong64.ACMPGEF, + name: "BICshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGED", - argLen: 2, - asm: loong64.ACMPGED, + name: "BICshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: loong64.ACMPGTF, + name: "BICshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: loong64.ACMPGTD, + name: "MVNshiftLL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BSTRPICKW", - auxType: auxInt64, + name: "MVNshiftRL", + auxType: auxInt32, argLen: 1, - asm: loong64.ABSTRPICKW, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BSTRPICKV", - auxType: auxInt64, + name: "MVNshiftRA", + auxType: auxInt32, argLen: 1, - asm: loong64.ABSTRPICKV, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVV, + name: "ADCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVF, + name: "ADCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVD, + name: "ADCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: loong64.AMOVV, + name: "SBCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018427387908}, // SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVB, + name: "SBCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVBU, + name: "SBCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVH, + name: "RSCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVHU, + name: "RSCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVW, + name: "RSCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVWU, + name: "ADDSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVV, + name: "ADDSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVF, + name: "ADDSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVD, + name: "SUBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVloadidx", - argLen: 3, - asm: loong64.AMOVV, + name: "SUBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: loong64.AMOVW, + name: "SUBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: loong64.AMOVWU, + name: "RSBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: loong64.AMOVH, + name: "RSBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: loong64.AMOVHU, + name: "RSBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBloadidx", + name: "ADDshiftLLreg", argLen: 3, - asm: loong64.AMOVB, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUloadidx", + name: "ADDshiftRLreg", argLen: 3, - asm: loong64.AMOVBU, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFloadidx", + name: "ADDshiftRAreg", argLen: 3, - asm: loong64.AMOVF, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDloadidx", + name: "SUBshiftLLreg", argLen: 3, - asm: loong64.AMOVD, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "SUBshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, + name: "SUBshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "RSBshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "RSBshiftRLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVF, + name: "RSBshiftRAreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVD, + name: "ANDshiftLLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "MOVBstoreidx", - argLen: 4, - asm: loong64.AMOVB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: loong64.AMOVH, + name: "ANDshiftRLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "MOVWstoreidx", - argLen: 4, - asm: loong64.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstoreidx", - argLen: 4, - asm: loong64.AMOVV, + name: "ANDshiftRAreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "MOVFstoreidx", - argLen: 4, - asm: loong64.AMOVF, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: loong64.AMOVD, + name: "ORshiftLLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "ORshiftRLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, + name: "ORshiftRAreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "XORshiftLLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "XORshiftRLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstorezeroidx", + name: "XORshiftRAreg", argLen: 3, - asm: loong64.AMOVB, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstorezeroidx", + name: "BICshiftLLreg", argLen: 3, - asm: loong64.AMOVH, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstorezeroidx", + name: "BICshiftRLreg", argLen: 3, - asm: loong64.AMOVW, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstorezeroidx", + name: "BICshiftRAreg", argLen: 3, - asm: loong64.AMOVV, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWfpgp", - argLen: 1, - asm: loong64.AMOVW, + name: "MVNshiftLLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWgpfp", - argLen: 1, - asm: loong64.AMOVW, + name: "MVNshiftRLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVfpgp", - argLen: 1, - asm: loong64.AMOVV, + name: "MVNshiftRAreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVgpfp", - argLen: 1, - asm: loong64.AMOVV, + name: "ADCshiftLLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: loong64.AMOVB, + name: "ADCshiftRLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: loong64.AMOVBU, + name: "ADCshiftRAreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: loong64.AMOVH, + name: "SBCshiftLLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: loong64.AMOVHU, + name: "SBCshiftRLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: loong64.AMOVW, + name: "SBCshiftRAreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: loong64.AMOVWU, + name: "RSCshiftLLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVreg", - argLen: 1, - asm: loong64.AMOVV, + name: "RSCshiftRLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVnop", - argLen: 1, - resultInArg0: true, + name: "RSCshiftRAreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: loong64.AMOVWF, + name: "ADDSshiftLLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: loong64.AMOVWD, + name: "ADDSshiftRLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVF", - argLen: 1, - asm: loong64.AMOVVF, + name: "ADDSshiftRAreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVD", - argLen: 1, - asm: loong64.AMOVVD, + name: "SUBSshiftLLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCFW", - argLen: 1, - asm: loong64.ATRUNCFW, + name: "SUBSshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCDW", - argLen: 1, - asm: loong64.ATRUNCDW, + name: "SUBSshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCFV", - argLen: 1, - asm: loong64.ATRUNCFV, + name: "RSBSshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCDV", - argLen: 1, - asm: loong64.ATRUNCDV, + name: "RSBSshiftRLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: loong64.AMOVFD, + name: "RSBSshiftRAreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: loong64.AMOVDF, + name: "CMP", + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "CMNconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMN, reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "TST", + argLen: 2, + commutative: true, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 268435456}, // R29 - {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "TSTconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, + name: "TEQ", + argLen: 2, + commutative: true, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 524290, // R1 R20 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "TEQconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 1572866, // R1 R20 R21 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "CMPF", + argLen: 2, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - clobbers: 524288, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "CMPD", + argLen: 2, + asm: arm.ACMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 - {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - clobbers: 1572864, // R20 R21 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "CMPshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "CMPshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "CMPshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMNshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMNshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMNshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore8Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TSTshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore32Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TSTshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore64Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TSTshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TEQshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TEQshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TEQshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMPshiftLLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMPshiftRLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMPshiftRAreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftLLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRAreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "TSTshiftLLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "TSTshiftRLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAnd32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "TSTshiftRAreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAnd64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBV, + name: "TEQshiftLLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicOr32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "TEQshiftRLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicOr64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBV, + name: "TEQshiftRAreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "CMPF0", + argLen: 1, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FPFlagTrue", + name: "CMPD0", argLen: 1, + asm: arm.ACMPD, reg: regInfo{ - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVW, reg: regInfo{ outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVF, reg: regInfo{ outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, rematerializeable: true, + asm: arm.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, rematerializeable: true, + symEffect: SymAddr, + asm: arm.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 4294975488}, // SP SB + }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVB, reg: regInfo{ - clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: loong64.ADBAR, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 4194304}, // R23 - {1, 8388608}, // R24 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, - }, - }, - { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 4194304}, // R23 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1048576}, // R21 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "PRELD", - auxType: auxInt64, + name: "MOVHUload", + auxType: auxSymOff, argLen: 2, - hasSideEffects: true, - asm: loong64.APRELD, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "PRELDX", - auxType: auxInt64, + name: "MOVWload", + auxType: auxSymOff, argLen: 2, - hasSideEffects: true, - asm: loong64.APRELDX, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: mips.AADDU, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AADDU, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUB", - argLen: 2, - asm: mips.ASUBU, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASUBU, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - clobbers: 105553116266496, // HI LO - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "MULT", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULTU", - argLen: 2, - commutative: true, - asm: mips.AMULU, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIV", - argLen: 2, - asm: mips.ADIV, + name: "MOVWloadidx", + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DIVU", - argLen: 2, - asm: mips.ADIVU, + name: "MOVWloadshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: mips.AADDF, + name: "MOVWloadshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: mips.AADDD, + name: "MOVWloadshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBF", - argLen: 2, - asm: mips.ASUBF, + name: "MOVBUloadidx", + argLen: 3, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBD", - argLen: 2, - asm: mips.ASUBD, + name: "MOVBloadidx", + argLen: 3, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: mips.AMULF, + name: "MOVHUloadidx", + argLen: 3, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: mips.AMULD, + name: "MOVHloadidx", + argLen: 3, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DIVF", - argLen: 2, - asm: mips.ADIVF, + name: "MOVWstoreidx", + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "DIVD", - argLen: 2, - asm: mips.ADIVD, + name: "MOVWstoreshiftLL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: mips.AAND, + name: "MOVWstoreshiftRL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "ANDconst", + name: "MOVWstoreshiftRA", auxType: auxInt32, - argLen: 1, - asm: mips.AAND, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: mips.AOR, + name: "MOVBstoreidx", + argLen: 4, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AOR, + name: "MOVHstoreidx", + argLen: 4, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: mips.AXOR, + name: "MOVBreg", + argLen: 1, + asm: arm.AMOVBS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AXOR, + name: "MOVBUreg", + argLen: 1, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: mips.ANOR, + name: "MOVHreg", + argLen: 1, + asm: arm.AMOVHS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ANOR, + name: "MOVHUreg", + argLen: 1, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEG", + name: "MOVWreg", argLen: 1, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: mips.ANEGF, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGD", + name: "MOVWF", argLen: 1, - asm: mips.ANEGD, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ABSD", + name: "MOVWD", argLen: 1, - asm: mips.AABSD, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SQRTD", + name: "MOVWUF", argLen: 1, - asm: mips.ASQRTD, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SQRTF", + name: "MOVWUD", argLen: 1, - asm: mips.ASQRTF, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SLL", - argLen: 2, - asm: mips.ASLL, + name: "MOVFW", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASLL, + name: "MOVDW", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRL", - argLen: 2, - asm: mips.ASRL, + name: "MOVFWU", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRL, + name: "MOVDWU", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRA", - argLen: 2, - asm: mips.ASRA, + name: "MOVFD", + argLen: 1, + asm: arm.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRA, + name: "MOVDF", + argLen: 1, + asm: arm.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLZ", - argLen: 1, - asm: mips.ACLZ, + name: "CMOVWHSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGT", - argLen: 2, - asm: mips.ASGT, + name: "CMOVWLSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGT, + name: "SRAcond", + argLen: 3, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTzero", - argLen: 1, - asm: mips.ASGT, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "SGTU", - argLen: 2, - asm: mips.ASGTU, + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "SGTUconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGTU, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 128}, // R7 + {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "SGTUzero", - argLen: 1, - asm: mips.ASGTU, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CMPEQF", - argLen: 2, - asm: mips.ACMPEQF, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: mips.ACMPEQD, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: mips.ACMPGEF, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGED", - argLen: 2, - asm: mips.ACMPGED, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: mips.ACMPGTF, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: mips.ACMPGTD, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVW, + name: "GreaterEqual", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVF, + name: "LessThanU", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVD, + name: "LessEqualU", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: mips.AMOVW, + name: "GreaterThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 140737555464192}, // SP SB - }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVB, + name: "GreaterEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20482, // R1 R12 R14 }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVH, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, + name: "LoweredZero", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 2}, // R1 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2, // R1 }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVW, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 6, // R1 R2 }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVF, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 128}, // R7 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVD, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVF, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVD, + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 16}, // R4 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 16}, // R4 + {1, 2}, // R1 + {2, 4}, // R2 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 16}, // R4 + {1, 1}, // R0 + {2, 2}, // R1 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, }, { - name: "MOVWfpgp", + name: "InvertFlags", argLen: 1, - asm: mips.AMOVW, + reg: regInfo{}, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, + clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 256}, // R8 }, }, }, + { - name: "MOVWgpfp", - argLen: 1, - asm: mips.AMOVW, + name: "ADCSflags", + argLen: 3, + commutative: true, + asm: arm64.AADCS, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBreg", + name: "ADCzerocarry", argLen: 1, - asm: mips.AMOVB, + asm: arm64.AADC, reg: regInfo{ - inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: mips.AMOVBU, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: mips.AMOVH, + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: mips.AMOVHU, + name: "ADDSconstflags", + auxType: auxInt64, + argLen: 1, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: mips.AMOVW, + name: "ADDSflags", + argLen: 2, + commutative: true, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "SUB", + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMOVZ", - argLen: 3, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "SUBconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMOVZzero", - argLen: 2, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "SBCSflags", + argLen: 3, + asm: arm64.ASBCS, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: mips.AMOVWF, + name: "SUBSflags", + argLen: 2, + asm: arm64.ASUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: mips.AMOVWD, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm64.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCFW", - argLen: 1, - asm: mips.ATRUNCFW, + name: "MULW", + argLen: 2, + commutative: true, + asm: arm64.AMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCDW", - argLen: 1, - asm: mips.ATRUNCDW, + name: "MNEG", + argLen: 2, + commutative: true, + asm: arm64.AMNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, + name: "MNEGW", + argLen: 2, + commutative: true, + asm: arm64.AMNEGW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: mips.AMOVDF, + name: "MULH", + argLen: 2, + commutative: true, + asm: arm64.ASMULH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, + name: "UMULH", + argLen: 2, + commutative: true, + asm: arm64.AUMULH, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "MULL", + argLen: 2, + commutative: true, + asm: arm64.ASMULL, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "UMULL", + argLen: 2, + commutative: true, + asm: arm64.AUMULL, reg: regInfo{ inputs: []inputInfo{ - {1, 4194304}, // R22 - {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "DIV", + argLen: 2, + asm: arm64.ASDIV, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "UDIV", + argLen: 2, + asm: arm64.AUDIV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "DIVW", + argLen: 2, + asm: arm64.ASDIVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "UDIVW", + argLen: 2, + asm: arm64.AUDIVW, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOD", + argLen: 2, + asm: arm64.AREM, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStorezero", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, + name: "UMOD", + argLen: 2, + asm: arm64.AUREM, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicExchange", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MODW", + argLen: 2, + asm: arm64.AREMW, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicAdd", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "UMODW", + argLen: 2, + asm: arm64.AUREMW, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicAddconst", - auxType: auxInt32, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FADDS", + argLen: 2, + commutative: true, + asm: arm64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FADDD", + argLen: 2, + commutative: true, + asm: arm64.AFADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAnd", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, + name: "FSUBS", + argLen: 2, + asm: arm64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicOr", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, + name: "FSUBD", + argLen: 2, + asm: arm64.AFSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredZero", - auxType: auxInt32, - argLen: 3, - faultOnNilArg0: true, + name: "FMULS", + argLen: 2, + commutative: true, + asm: arm64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt32, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "FMULD", + argLen: 2, + commutative: true, + asm: arm64.AFMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "FNMULS", + argLen: 2, + commutative: true, + asm: arm64.AFNMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FPFlagTrue", - argLen: 1, + name: "FNMULD", + argLen: 2, + commutative: true, + asm: arm64.AFNMULD, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "FDIVS", + argLen: 2, + asm: arm64.AFDIVS, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "FDIVD", + argLen: 2, + asm: arm64.AFDIVD, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 4194304}, // R22 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "AND", + argLen: 2, + commutative: true, + asm: arm64.AAND, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AAND, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "OR", + argLen: 2, + commutative: true, + asm: arm64.AORR, reg: regInfo{ - clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: mips.ASYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", + name: "ORconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 8}, // R3 - {2, 16}, // R4 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicExtendB", + name: "XORconst", auxType: auxInt64, - argLen: 4, - call: true, + argLen: 1, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 2}, // R1 - {2, 4}, // R2 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, - { - name: "ADDV", - argLen: 2, - commutative: true, - asm: mips.AADDVU, + name: "BIC", + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AADDVU, + name: "EON", + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBV", + name: "ORN", argLen: 2, - asm: mips.ASUBVU, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASUBVU, + name: "MVN", + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULV", - argLen: 2, - commutative: true, - asm: mips.AMULV, + name: "NEG", + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULVU", - argLen: 2, - commutative: true, - asm: mips.AMULVU, + name: "NEGSflags", + argLen: 1, + asm: arm64.ANEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVV", - argLen: 2, - asm: mips.ADIVV, + name: "NGCzerocarry", + argLen: 1, + asm: arm64.ANGC, reg: regInfo{ - inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVVU", - argLen: 2, - asm: mips.ADIVVU, + name: "FABSD", + argLen: 1, + asm: arm64.AFABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: mips.AADDF, + name: "FNEGS", + argLen: 1, + asm: arm64.AFNEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: mips.AADDD, + name: "FNEGD", + argLen: 1, + asm: arm64.AFNEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBF", - argLen: 2, - asm: mips.ASUBF, + name: "FSQRTD", + argLen: 1, + asm: arm64.AFSQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBD", - argLen: 2, - asm: mips.ASUBD, + name: "FSQRTS", + argLen: 1, + asm: arm64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: mips.AMULF, + name: "FMIND", + argLen: 2, + asm: arm64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: mips.AMULD, + name: "FMINS", + argLen: 2, + asm: arm64.AFMINS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVF", + name: "FMAXD", argLen: 2, - asm: mips.ADIVF, + asm: arm64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVD", + name: "FMAXS", argLen: 2, - asm: mips.ADIVD, + asm: arm64.AFMAXS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: mips.AAND, + name: "REV", + argLen: 1, + asm: arm64.AREV, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AAND, + name: "REVW", + argLen: 1, + asm: arm64.AREVW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: mips.AOR, + name: "REV16", + argLen: 1, + asm: arm64.AREV16, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AOR, + name: "REV16W", + argLen: 1, + asm: arm64.AREV16W, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: mips.AXOR, + name: "RBIT", + argLen: 1, + asm: arm64.ARBIT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AXOR, + name: "RBITW", + argLen: 1, + asm: arm64.ARBITW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: mips.ANOR, + name: "CLZ", + argLen: 1, + asm: arm64.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NORconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ANOR, + name: "CLZW", + argLen: 1, + asm: arm64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NEGV", + name: "VCNT", argLen: 1, + asm: arm64.AVCNT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGF", + name: "VUADDLV", argLen: 1, - asm: mips.ANEGF, + asm: arm64.AVUADDLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGD", - argLen: 1, - asm: mips.ANEGD, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ABSD", - argLen: 1, - asm: mips.AABSD, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: mips.ASQRTD, + name: "FMADDS", + argLen: 3, + asm: arm64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: mips.ASQRTF, + name: "FMADDD", + argLen: 3, + asm: arm64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLV", - argLen: 2, - asm: mips.ASLLV, + name: "FNMADDS", + argLen: 3, + asm: arm64.AFNMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASLLV, + name: "FNMADDD", + argLen: 3, + asm: arm64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRLV", - argLen: 2, - asm: mips.ASRLV, + name: "FMSUBS", + argLen: 3, + asm: arm64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASRLV, + name: "FMSUBD", + argLen: 3, + asm: arm64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAV", - argLen: 2, - asm: mips.ASRAV, + name: "FNMSUBS", + argLen: 3, + asm: arm64.AFNMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASRAV, + name: "FNMSUBD", + argLen: 3, + asm: arm64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SGT", - argLen: 2, - asm: mips.ASGT, + name: "MADD", + argLen: 3, + asm: arm64.AMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASGT, + name: "MADDW", + argLen: 3, + asm: arm64.AMADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTU", - argLen: 2, - asm: mips.ASGTU, + name: "MSUB", + argLen: 3, + asm: arm64.AMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTUconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASGTU, + name: "MSUBW", + argLen: 3, + asm: arm64.AMSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPEQF", + name: "SLL", argLen: 2, - asm: mips.ACMPEQF, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "CMPEQD", - argLen: 2, - asm: mips.ACMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: mips.ACMPGEF, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGED", + name: "SRL", argLen: 2, - asm: mips.ACMPGED, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: mips.ACMPGTF, + name: "SRLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTD", + name: "SRA", argLen: 2, - asm: mips.ACMPGTD, + asm: arm64.AASR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "MOVVconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVV, - reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVF, + name: "SRAconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AASR, reg: regInfo{ - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVD, - reg: regInfo{ outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: mips.AMOVV, + name: "ROR", + argLen: 2, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018460942336}, // SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVB, + name: "RORW", + argLen: 2, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVBU, + name: "RORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVH, + name: "RORWconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVHU, + name: "EXTRconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVW, + name: "EXTRWconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVWU, + name: "CMP", + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVV, + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVF, + name: "CMPW", + argLen: 2, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVD, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "CMNconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, + name: "CMNW", + argLen: 2, + commutative: true, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, + name: "CMNWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVF, + name: "TST", + argLen: 2, + commutative: true, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVD, + name: "TSTconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "TSTW", + argLen: 2, + commutative: true, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "TSTWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, + name: "FCMPS", + argLen: 2, + asm: arm64.AFCMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, + name: "FCMPD", + argLen: 2, + asm: arm64.AFCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWfpgp", + name: "FCMPS0", argLen: 1, - asm: mips.AMOVW, + asm: arm64.AFCMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWgpfp", + name: "FCMPD0", argLen: 1, - asm: mips.AMOVW, + asm: arm64.AFCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVfpgp", - argLen: 1, - asm: mips.AMOVV, + name: "MVNshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVgpfp", - argLen: 1, - asm: mips.AMOVV, + name: "MVNshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: mips.AMOVB, + name: "MVNshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: mips.AMOVBU, + name: "MVNshiftRO", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: mips.AMOVH, + name: "NEGshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: mips.AMOVHU, + name: "NEGshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: mips.AMOVW, + name: "NEGshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: mips.AMOVWU, - reg: regInfo{ + name: "ADDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, + reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVreg", - argLen: 1, - asm: mips.AMOVV, + name: "ADDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVnop", - argLen: 1, - resultInArg0: true, + name: "ADDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: mips.AMOVWF, + name: "SUBshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: mips.AMOVWD, + name: "SUBshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVF", - argLen: 1, - asm: mips.AMOVVF, + name: "SUBshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVD", - argLen: 1, - asm: mips.AMOVVD, + name: "ANDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCFW", - argLen: 1, - asm: mips.ATRUNCFW, + name: "ANDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCDW", - argLen: 1, - asm: mips.ATRUNCDW, + name: "ANDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCFV", - argLen: 1, - asm: mips.ATRUNCFV, + name: "ANDshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCDV", - argLen: 1, - asm: mips.ATRUNCDV, + name: "ORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, + name: "ORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: mips.AMOVDF, + name: "ORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, + name: "ORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "XORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "XORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 4194304}, // R22 - {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "XORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, + name: "XORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 134217730, // R1 R31 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "BICshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 134217734, // R1 R2 R31 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "BICshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "BICshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, + name: "BICshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, + name: "EONshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "EONshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "EONshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "EONshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStorezero32", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStorezero64", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMPshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMPshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMPshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAddconst32", - auxType: auxInt32, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + { + name: "TSTshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAddconst64", - auxType: auxInt64, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "TSTshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + { + name: "TSTshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "TSTshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "BFI", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "BFXIL", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFXIL, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "SBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFIZ, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FPFlagTrue", - argLen: 1, + name: "SBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "UBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFIZ, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "UBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 4194304}, // R22 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, rematerializeable: true, + asm: arm64.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerPC", + name: "FMOVSconst", + auxType: auxFloat64, argLen: 0, rematerializeable: true, + asm: arm64.AFMOVS, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVD, reg: regInfo{ - clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: mips.ASYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 9223372037928517632}, // SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: ppc64.AADD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDCC", - argLen: 2, - commutative: true, - asm: ppc64.AADDCC, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADD, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDCCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADDCCC, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FADD", - argLen: 2, - commutative: true, - asm: ppc64.AFADD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FADDS", - argLen: 2, - commutative: true, - asm: ppc64.AFADDS, + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUB", - argLen: 2, - asm: ppc64.ASUB, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBCC", - argLen: 2, - asm: ppc64.ASUBCC, + name: "LDP", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "SUBFCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "LDPW", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FSUB", - argLen: 2, - asm: ppc64.AFSUB, + name: "LDPSW", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDPSW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: ppc64.AFSUBS, + name: "FLDPD", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMINJDP", - argLen: 2, - asm: ppc64.AXSMINJDP, + name: "FLDPS", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMAXJDP", - argLen: 2, - asm: ppc64.AXSMAXJDP, + name: "MOVDloadidx", + argLen: 3, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLD", - argLen: 2, - commutative: true, - asm: ppc64.AMULLD, + name: "MOVWloadidx", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - asm: ppc64.AMULLW, + name: "MOVWUloadidx", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLD, + name: "MOVHloadidx", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLWconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLW, + name: "MOVHUloadidx", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MADDLD", + name: "MOVBloadidx", argLen: 3, - asm: ppc64.AMADDLD, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULHD", - argLen: 2, - commutative: true, - asm: ppc64.AMULHD, + name: "MOVBUloadidx", + argLen: 3, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULHW", - argLen: 2, - commutative: true, - asm: ppc64.AMULHW, + name: "FMOVSloadidx", + argLen: 3, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHDU", - argLen: 2, - commutative: true, - asm: ppc64.AMULHDU, + name: "FMOVDloadidx", + argLen: 3, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHDUCC", - argLen: 2, - commutative: true, - asm: ppc64.AMULHDUCC, + name: "MOVHloadidx2", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULHWU", - argLen: 2, - commutative: true, - asm: ppc64.AMULHWU, + name: "MOVHUloadidx2", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMUL", - argLen: 2, - commutative: true, - asm: ppc64.AFMUL, + name: "MOVWloadidx4", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: ppc64.AFMULS, + name: "MOVWUloadidx4", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMADD", + name: "MOVDloadidx8", argLen: 3, - asm: ppc64.AFMADD, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMADDS", + name: "FMOVSloadidx4", argLen: 3, - asm: ppc64.AFMADDS, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUB", + name: "FMOVDloadidx8", argLen: 3, - asm: ppc64.AFMSUB, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: ppc64.AFMSUBS, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRAD", - argLen: 2, - asm: ppc64.ASRAD, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRAW", - argLen: 2, - asm: ppc64.ASRAW, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRD", - argLen: 2, - asm: ppc64.ASRD, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRW", - argLen: 2, - asm: ppc64.ASRW, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLD", - argLen: 2, - asm: ppc64.ASLD, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLW", - argLen: 2, - asm: ppc64.ASLW, + name: "STP", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ROTL", - argLen: 2, - asm: ppc64.AROTL, + name: "STPW", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ROTLW", - argLen: 2, - asm: ppc64.AROTLW, + name: "FSTPD", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLRLSLWI", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACLRLSLWI, + name: "FSTPS", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLRLSLDI", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACLRLSLDI, + name: "MOVBstoreidx", + argLen: 4, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ADDC", - argLen: 2, - commutative: true, - asm: ppc64.AADDC, + name: "MOVHstoreidx", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBC", - argLen: 2, - asm: ppc64.ASUBC, + name: "MOVWstoreidx", + argLen: 4, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ADDCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADDC, + name: "MOVDstoreidx", + argLen: 4, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "FMOVSstoreidx", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - asm: ppc64.AADDE, + name: "FMOVDstoreidx", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDZE", - argLen: 2, - asm: ppc64.AADDZE, + name: "MOVHstoreidx2", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBE", - argLen: 3, - asm: ppc64.ASUBE, + name: "MOVWstoreidx4", + argLen: 4, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ADDZEzero", - argLen: 1, - asm: ppc64.AADDZE, + name: "MOVDstoreidx8", + argLen: 4, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBZEzero", - argLen: 1, - asm: ppc64.ASUBZE, + name: "FMOVSstoreidx4", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRADconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAD, + name: "FMOVDstoreidx8", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAW, + name: "FMOVDgpfp", + argLen: 1, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRD, + name: "FMOVDfpgp", + argLen: 1, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRW, + name: "FMOVSgpfp", + argLen: 1, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLD, + name: "FMOVSfpgp", + argLen: 1, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLW, + name: "MOVBreg", + argLen: 1, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ROTLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTL, + name: "MOVBUreg", + argLen: 1, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ROTLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTLW, + name: "MOVHreg", + argLen: 1, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "EXTSWSLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AEXTSWSLI, + name: "MOVHUreg", + argLen: 1, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLWINM", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLWNM, + name: "MOVWreg", + argLen: 1, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLWNM", - auxType: auxInt64, - argLen: 2, - asm: ppc64.ARLWNM, + name: "MOVWUreg", + argLen: 1, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLWMI", - auxType: auxInt64, - argLen: 2, - resultInArg0: true, - asm: ppc64.ARLWMI, + name: "MOVDreg", + argLen: 1, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLDICL", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICL, + name: "MOVDnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLDICLCC", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICLCC, + name: "SCVTFWS", + argLen: 1, + asm: arm64.ASCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLDICR", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICR, + name: "SCVTFWD", + argLen: 1, + asm: arm64.ASCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZD", + name: "UCVTFWS", argLen: 1, - asm: ppc64.ACNTLZD, + asm: arm64.AUCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZDCC", + name: "UCVTFWD", argLen: 1, - asm: ppc64.ACNTLZDCC, + asm: arm64.AUCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZW", + name: "SCVTFS", argLen: 1, - asm: ppc64.ACNTLZW, + asm: arm64.ASCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTTZD", + name: "SCVTFD", argLen: 1, - asm: ppc64.ACNTTZD, + asm: arm64.ASCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTTZW", + name: "UCVTFS", argLen: 1, - asm: ppc64.ACNTTZW, + asm: arm64.AUCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "POPCNTD", + name: "UCVTFD", argLen: 1, - asm: ppc64.APOPCNTD, + asm: arm64.AUCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "POPCNTW", + name: "FCVTZSSW", argLen: 1, - asm: ppc64.APOPCNTW, + asm: arm64.AFCVTZSSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "POPCNTB", + name: "FCVTZSDW", argLen: 1, - asm: ppc64.APOPCNTB, + asm: arm64.AFCVTZSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FDIV", - argLen: 2, - asm: ppc64.AFDIV, + name: "FCVTZUSW", + argLen: 1, + asm: arm64.AFCVTZUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: ppc64.AFDIVS, + name: "FCVTZUDW", + argLen: 1, + asm: arm64.AFCVTZUDW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVD", - argLen: 2, - asm: ppc64.ADIVD, + name: "FCVTZSS", + argLen: 1, + asm: arm64.AFCVTZSS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVW", - argLen: 2, - asm: ppc64.ADIVW, + name: "FCVTZSD", + argLen: 1, + asm: arm64.AFCVTZSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVDU", - argLen: 2, - asm: ppc64.ADIVDU, + name: "FCVTZUS", + argLen: 1, + asm: arm64.AFCVTZUS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVWU", - argLen: 2, - asm: ppc64.ADIVWU, + name: "FCVTZUD", + argLen: 1, + asm: arm64.AFCVTZUD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MODUD", - argLen: 2, - asm: ppc64.AMODUD, + name: "FCVTSD", + argLen: 1, + asm: arm64.AFCVTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MODSD", - argLen: 2, - asm: ppc64.AMODSD, + name: "FCVTDS", + argLen: 1, + asm: arm64.AFCVTDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MODUW", - argLen: 2, - asm: ppc64.AMODUW, + name: "FRINTAD", + argLen: 1, + asm: arm64.AFRINTAD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MODSW", - argLen: 2, - asm: ppc64.AMODSW, + name: "FRINTMD", + argLen: 1, + asm: arm64.AFRINTMD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCTIDZ", + name: "FRINTND", argLen: 1, - asm: ppc64.AFCTIDZ, + asm: arm64.AFRINTND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCTIWZ", + name: "FRINTPD", argLen: 1, - asm: ppc64.AFCTIWZ, + asm: arm64.AFRINTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFID", + name: "FRINTZD", argLen: 1, - asm: ppc64.AFCFID, + asm: arm64.AFRINTZD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFIDS", - argLen: 1, - asm: ppc64.AFCFIDS, + name: "CSEL", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRSP", - argLen: 1, - asm: ppc64.AFRSP, + name: "CSEL0", + auxType: auxCCop, + argLen: 2, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MFVSRD", - argLen: 1, - asm: ppc64.AMFVSRD, + name: "CSINC", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINC, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MTVSRD", - argLen: 1, - asm: ppc64.AMTVSRD, + name: "CSINV", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: ppc64.AAND, + name: "CSNEG", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDN", - argLen: 2, - asm: ppc64.AANDN, + name: "CSETM", + auxType: auxCCop, + argLen: 1, + asm: arm64.ACSETM, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDNCC", - argLen: 2, - asm: ppc64.AANDNCC, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "ANDCC", - argLen: 2, - commutative: true, - asm: ppc64.AANDCC, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: ppc64.AOR, + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 33554432}, // R26 + {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "ORN", - argLen: 2, - asm: ppc64.AORN, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "ORCC", - argLen: 2, - commutative: true, - asm: ppc64.AORCC, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: ppc64.ANOR, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NORCC", - argLen: 2, - commutative: true, - asm: ppc64.ANORCC, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: ppc64.AXOR, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORCC", - argLen: 2, - commutative: true, - asm: ppc64.AXORCC, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "EQV", - argLen: 2, - commutative: true, - asm: ppc64.AEQV, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NEG", + name: "GreaterEqual", argLen: 1, - asm: ppc64.ANEG, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NEGCC", + name: "LessThanU", argLen: 1, - asm: ppc64.ANEGCC, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BRD", + name: "LessEqualU", argLen: 1, - asm: ppc64.ABRD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BRW", + name: "GreaterThanU", argLen: 1, - asm: ppc64.ABRW, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BRH", + name: "GreaterEqualU", argLen: 1, - asm: ppc64.ABRH, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNEG", + name: "LessThanF", argLen: 1, - asm: ppc64.AFNEG, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSQRT", + name: "LessEqualF", argLen: 1, - asm: ppc64.AFSQRT, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSQRTS", + name: "GreaterThanF", argLen: 1, - asm: ppc64.AFSQRTS, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FFLOOR", + name: "GreaterEqualF", argLen: 1, - asm: ppc64.AFRIM, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCEIL", + name: "NotLessThanF", argLen: 1, - asm: ppc64.AFRIP, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FTRUNC", + name: "NotLessEqualF", argLen: 1, - asm: ppc64.AFRIZ, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FROUND", + name: "NotGreaterThanF", argLen: 1, - asm: ppc64.AFRIN, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FABS", + name: "NotGreaterEqualF", argLen: 1, - asm: ppc64.AFABS, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNABS", + name: "LessThanNoov", argLen: 1, - asm: ppc64.AFNABS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - }, - }, - { - name: "FCPSGN", - argLen: 2, - asm: ppc64.AFCPSGN, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AOR, + name: "GreaterEqualNoov", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AXOR, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 524288}, // R20 }, + clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "ANDCCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AANDCC, + name: "LoweredZero", + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 65536}, // R16 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 65536, // R16 }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - asm: ppc64.AANDCC, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "MOVBreg", - argLen: 1, - asm: ppc64.AMOVB, + name: "LoweredMove", + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 131072}, // R17 + {1, 65536}, // R16 + {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 }, + clobbers: 16973824, // R16 R17 R25 }, }, { - name: "MOVBZreg", - argLen: 1, - asm: ppc64.AMOVBZ, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 33554432}, // R26 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: ppc64.AMOVH, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHZreg", - argLen: 1, - asm: ppc64.AMOVHZ, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: ppc64.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, }, { - name: "MOVWZreg", + name: "InvertFlags", argLen: 1, - asm: ppc64.AMOVWZ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, + reg: regInfo{}, }, { - name: "MOVBZload", - auxType: auxSymOff, + name: "LDAR", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVBZ, + asm: arm64.ALDAR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, + name: "LDARB", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVH, + asm: arm64.ALDARB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHZload", - auxType: auxSymOff, + name: "LDARW", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVHZ, + asm: arm64.ALDARW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, + name: "STLRB", + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVW, + hasSideEffects: true, + asm: arm64.ASTLRB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVWZload", - auxType: auxSymOff, - argLen: 2, + name: "STLR", + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVWZ, + hasSideEffects: true, + asm: arm64.ASTLR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, + name: "STLRW", + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVD, + hasSideEffects: true, + asm: arm64.ASTLRW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVDBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "LoweredAtomicExchange8", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBZloadidx", - argLen: 3, - asm: ppc64.AMOVBZ, + name: "LoweredAtomicExchange64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: ppc64.AMOVH, + name: "LoweredAtomicExchange32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHZloadidx", - argLen: 3, - asm: ppc64.AMOVHZ, + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: ppc64.AMOVW, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWZloadidx", - argLen: 3, - asm: ppc64.AMOVWZ, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: ppc64.AMOVD, + name: "LoweredAtomicAdd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHBRloadidx", - argLen: 3, - asm: ppc64.AMOVHBR, + name: "LoweredAtomicAdd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWBRloadidx", - argLen: 3, - asm: ppc64.AMOVWBR, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDBRloadidx", - argLen: 3, - asm: ppc64.AMOVDBR, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDloadidx", - argLen: 3, - asm: ppc64.AFMOVD, + name: "LoweredAtomicCas64Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: ppc64.AFMOVS, + name: "LoweredAtomicCas32Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DCBT", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: ppc64.ADCBT, + name: "LoweredAtomicAnd8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "LoweredAtomicOr8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "LoweredAtomicAnd64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "LoweredAtomicOr64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVD, + name: "LoweredAtomicAnd32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVS, + name: "LoweredAtomicOr32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "LoweredAtomicAnd8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "LoweredAtomicOr8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "LoweredAtomicAnd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "LoweredAtomicOr64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVD, + name: "LoweredAtomicAnd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVS, + name: "LoweredAtomicOr32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: ppc64.AMOVB, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 16777216}, // R25 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: ppc64.AMOVH, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: ppc64.AMOVW, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: ppc64.AMOVD, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: ppc64.AFMOVD, + name: "PRFM", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: arm64.APRFM, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: ppc64.AFMOVS, + name: "DMB", + auxType: auxInt64, + argLen: 1, + hasSideEffects: true, + asm: arm64.ADMB, + reg: regInfo{}, + }, + { + name: "ZERO", + argLen: 0, + zeroWidth: true, + fixedReg: true, + reg: regInfo{}, + }, + + { + name: "NEGV", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHBRstoreidx", - argLen: 4, - asm: ppc64.AMOVHBR, + name: "NEGF", + argLen: 1, + asm: loong64.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWBRstoreidx", - argLen: 4, - asm: ppc64.AMOVWBR, + name: "NEGD", + argLen: 1, + asm: loong64.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDBRstoreidx", - argLen: 4, - asm: ppc64.AMOVDBR, + name: "SQRTD", + argLen: 1, + asm: loong64.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "SQRTF", + argLen: 1, + asm: loong64.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "ABSD", + argLen: 1, + asm: loong64.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "CLZW", + argLen: 1, + asm: loong64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "CLZV", + argLen: 1, + asm: loong64.ACLZV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: ppc64.AMOVD, + name: "CTZW", + argLen: 1, + asm: loong64.ACTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AMOVD, + name: "CTZV", + argLen: 1, + asm: loong64.ACTZV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVD, + name: "REVB2H", + argLen: 1, + asm: loong64.AREVB2H, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVS, + name: "REVB2W", + argLen: 1, + asm: loong64.AREVB2W, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FCMPU", - argLen: 2, - asm: ppc64.AFCMPU, + name: "REVBV", + argLen: 1, + asm: loong64.AREVBV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMP", - argLen: 2, - asm: ppc64.ACMP, + name: "BITREV4B", + argLen: 1, + asm: loong64.ABITREV4B, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPU", - argLen: 2, - asm: ppc64.ACMPU, + name: "BITREVW", + argLen: 1, + asm: loong64.ABITREVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPW", - argLen: 2, - asm: ppc64.ACMPW, + name: "BITREVV", + argLen: 1, + asm: loong64.ABITREVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: ppc64.ACMPWU, + name: "VPCNT64", + argLen: 1, + asm: loong64.AVPCNTV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ACMP, + name: "VPCNT32", + argLen: 1, + asm: loong64.AVPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPUconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ACMPU, + name: "VPCNT16", + argLen: 1, + asm: loong64.AVPCNTH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACMPW, + name: "ADDV", + argLen: 2, + commutative: true, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, + name: "ADDVconst", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPWU, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ISEL", - auxType: auxInt32, - argLen: 3, - asm: ppc64.AISEL, + name: "SUBV", + argLen: 2, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ISELZ", - auxType: auxInt32, - argLen: 2, - asm: ppc64.AISEL, + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SETBC", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ASETBC, + name: "MULV", + argLen: 2, + commutative: true, + asm: loong64.AMULV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SETBCR", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ASETBCR, + name: "MULHV", + argLen: 2, + commutative: true, + asm: loong64.AMULHV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "Equal", - argLen: 1, + name: "MULHVU", + argLen: 2, + commutative: true, + asm: loong64.AMULHVU, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "DIVV", + argLen: 2, + asm: loong64.ADIVV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LessThan", - argLen: 1, + name: "DIVVU", + argLen: 2, + asm: loong64.ADIVVU, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FLessThan", - argLen: 1, + name: "REMV", + argLen: 2, + asm: loong64.AREMV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "REMVU", + argLen: 2, + asm: loong64.AREMVU, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FLessEqual", - argLen: 1, + name: "ADDF", + argLen: 2, + commutative: true, + asm: loong64.AADDF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "ADDD", + argLen: 2, + commutative: true, + asm: loong64.AADDD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FGreaterThan", - argLen: 1, + name: "SUBF", + argLen: 2, + asm: loong64.ASUBF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "SUBD", + argLen: 2, + asm: loong64.ASUBD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FGreaterEqual", - argLen: 1, + name: "MULF", + argLen: 2, + commutative: true, + asm: loong64.AMULF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "MULD", + argLen: 2, + commutative: true, + asm: loong64.AMULD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 2048}, // R11 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "DIVF", + argLen: 2, + asm: loong64.ADIVF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "DIVD", + argLen: 2, + asm: loong64.ADIVD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "AND", + argLen: 2, + commutative: true, + asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 2147483648, // R31 }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "OR", + argLen: 2, + commutative: true, + asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AOR, reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: loong64.AXOR, reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 - {1, 2048}, // R11 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "NOR", + argLen: 2, + commutative: true, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "ANDN", + argLen: 2, + asm: loong64.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredQuadZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "ORN", + argLen: 2, + asm: loong64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredQuadZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "FMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFMADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredQuadMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredQuadMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FNMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore8", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, - }, - { - name: "LoweredAtomicStore32", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - }, - }, - { - name: "LoweredAtomicStore64", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoad8", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FNMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoad32", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoad64", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FMINF", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: loong64.AFMINF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoadPtr", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FMIND", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: loong64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, + name: "FMAXF", + argLen: 2, + commutative: true, resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, + name: "FMAXD", + argLen: 2, + commutative: true, resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MASKEQZ", + argLen: 2, + asm: loong64.AMASKEQZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MASKNEZ", + argLen: 2, + asm: loong64.AMASKNEZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FCOPYSGD", + argLen: 2, + asm: loong64.AFCOPYSGD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas64", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "SLL", + argLen: 2, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas32", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "SLLV", + argLen: 2, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "SRL", + argLen: 2, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "SRLV", + argLen: 2, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - reg: regInfo{ - clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER outputs: []outputInfo{ - {0, 536870912}, // R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: ppc64.ALWSYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", + name: "SRLconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 64}, // R6 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsB", + name: "SRLVconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 32}, // R5 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SRA", + argLen: 2, + asm: loong64.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, - }, - - { - name: "ADD", - argLen: 2, - commutative: true, - asm: riscv.AADD, + name: "SRAV", + argLen: 2, + asm: loong64.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDI", + name: "SRAconst", auxType: auxInt64, argLen: 1, - asm: riscv.AADDI, + asm: loong64.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDIW", + name: "SRAVconst", auxType: auxInt64, argLen: 1, - asm: riscv.AADDIW, + asm: loong64.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEG", - argLen: 1, - asm: riscv.ANEG, + name: "ROTR", + argLen: 2, + asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEGW", - argLen: 1, - asm: riscv.ANEGW, + name: "ROTRV", + argLen: 2, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUB", - argLen: 2, - asm: riscv.ASUB, + name: "ROTRconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUBW", - argLen: 2, - asm: riscv.ASUBW, + name: "ROTRVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: riscv.AMUL, + name: "SGT", + argLen: 2, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULW", - argLen: 2, - commutative: true, - asm: riscv.AMULW, + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULH", - argLen: 2, - commutative: true, - asm: riscv.AMULH, + name: "SGTU", + argLen: 2, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULHU", - argLen: 2, - commutative: true, - asm: riscv.AMULHU, + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredMuluhilo", - argLen: 2, - resultNotInArgs: true, + name: "CMPEQF", + argLen: 2, + asm: loong64.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredMuluover", - argLen: 2, - resultNotInArgs: true, + name: "CMPEQD", + argLen: 2, + asm: loong64.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIV", + name: "CMPGEF", argLen: 2, - asm: riscv.ADIV, + asm: loong64.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVU", + name: "CMPGED", argLen: 2, - asm: riscv.ADIVU, + asm: loong64.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVW", + name: "CMPGTF", argLen: 2, - asm: riscv.ADIVW, + asm: loong64.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVUW", + name: "CMPGTD", argLen: 2, - asm: riscv.ADIVUW, + asm: loong64.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "REM", - argLen: 2, - asm: riscv.AREM, + name: "BSTRPICKW", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REMU", - argLen: 2, - asm: riscv.AREMU, + name: "BSTRPICKV", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REMW", - argLen: 2, - asm: riscv.AREMW, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REMUW", - argLen: 2, - asm: riscv.AREMUW, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVaddr", - auxType: auxSymOff, - argLen: 1, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, rematerializeable: true, - symEffect: SymAddr, - asm: riscv.AMOV, + asm: loong64.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, rematerializeable: true, - asm: riscv.AMOV, + symEffect: SymAddr, + asm: loong64.AMOVV, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018427387908}, // SP SB + }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -33875,1318 +35990,1298 @@ var opcodeTable = [...]opInfo{ argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVB, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHload", + name: "MOVBUload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVH, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWload", + name: "MOVHload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVW, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDload", + name: "MOVHUload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOV, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBUload", + name: "MOVWload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVBU, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHUload", + name: "MOVWUload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVHU, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWUload", + name: "MOVVload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVWU, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstore", + name: "MOVFload", auxType: auxSymOff, - argLen: 3, + argLen: 2, faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + symEffect: SymRead, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWstore", + name: "MOVDload", auxType: auxSymOff, - argLen: 3, + argLen: 2, faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + symEffect: SymRead, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, + name: "MOVVloadidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + name: "MOVWloadidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, + name: "MOVWUloadidx", + argLen: 3, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + name: "MOVHloadidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, + name: "MOVHUloadidx", + argLen: 3, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: riscv.AMOVB, + name: "MOVBloadidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: riscv.AMOVH, + name: "MOVBUloadidx", + argLen: 3, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: riscv.AMOVW, + name: "MOVFloadidx", + argLen: 3, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDreg", - argLen: 1, - asm: riscv.AMOV, + name: "MOVDloadidx", + argLen: 3, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: riscv.AMOVBU, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: riscv.AMOVHU, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: riscv.AMOVWU, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SLL", - argLen: 2, - asm: riscv.ASLL, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLW", - argLen: 2, - asm: riscv.ASLLW, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRA", - argLen: 2, - asm: riscv.ASRA, + name: "MOVBstoreidx", + argLen: 4, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRAW", - argLen: 2, - asm: riscv.ASRAW, + name: "MOVHstoreidx", + argLen: 4, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRL", - argLen: 2, - asm: riscv.ASRL, + name: "MOVWstoreidx", + argLen: 4, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLW", - argLen: 2, - asm: riscv.ASRLW, + name: "MOVVstoreidx", + argLen: 4, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SLLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLI, + name: "MOVFstoreidx", + argLen: 4, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLIW, + name: "MOVDstoreidx", + argLen: 4, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAI, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRAIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAIW, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLI, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLIW, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SH1ADD", - argLen: 2, - asm: riscv.ASH1ADD, + name: "MOVBstorezeroidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SH2ADD", - argLen: 2, - asm: riscv.ASH2ADD, + name: "MOVHstorezeroidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SH3ADD", - argLen: 2, - asm: riscv.ASH3ADD, + name: "MOVWstorezeroidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: riscv.AAND, + name: "MOVVstorezeroidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "ANDN", - argLen: 2, - asm: riscv.AANDN, + name: "MOVWfpgp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ANDI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AANDI, + name: "MOVWgpfp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZ", + name: "MOVVfpgp", argLen: 1, - asm: riscv.ACLZ, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CLZW", + name: "MOVVgpfp", argLen: 1, - asm: riscv.ACLZW, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CPOP", + name: "MOVBreg", argLen: 1, - asm: riscv.ACPOP, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CPOPW", + name: "MOVBUreg", argLen: 1, - asm: riscv.ACPOPW, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CTZ", + name: "MOVHreg", argLen: 1, - asm: riscv.ACTZ, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CTZW", + name: "MOVHUreg", argLen: 1, - asm: riscv.ACTZW, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NOT", + name: "MOVWreg", argLen: 1, - asm: riscv.ANOT, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: riscv.AOR, + name: "MOVWUreg", + argLen: 1, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ORN", - argLen: 2, - asm: riscv.AORN, + name: "MOVVreg", + argLen: 1, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AORI, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REV8", + name: "MOVWF", argLen: 1, - asm: riscv.AREV8, + asm: loong64.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ROL", - argLen: 2, - asm: riscv.AROL, + name: "MOVWD", + argLen: 1, + asm: loong64.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ROLW", - argLen: 2, - asm: riscv.AROLW, + name: "MOVVF", + argLen: 1, + asm: loong64.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ROR", - argLen: 2, - asm: riscv.AROR, + name: "MOVVD", + argLen: 1, + asm: loong64.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ARORI, + name: "TRUNCFW", + argLen: 1, + asm: loong64.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RORIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ARORIW, + name: "TRUNCDW", + argLen: 1, + asm: loong64.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RORW", - argLen: 2, - asm: riscv.ARORW, + name: "TRUNCFV", + argLen: 1, + asm: loong64.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XNOR", - argLen: 2, - commutative: true, - asm: riscv.AXNOR, + name: "TRUNCDV", + argLen: 1, + asm: loong64.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: riscv.AXOR, + name: "MOVFD", + argLen: 1, + asm: loong64.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AXORI, + name: "MOVDF", + argLen: 1, + asm: loong64.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MIN", - argLen: 2, - commutative: true, - asm: riscv.AMIN, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MAX", - argLen: 2, - commutative: true, - asm: riscv.AMAX, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MINU", - argLen: 2, - commutative: true, - asm: riscv.AMINU, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "MAXU", - argLen: 2, - commutative: true, - asm: riscv.AMAXU, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 268435456}, // R29 + {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "SEQZ", - argLen: 1, - asm: riscv.ASEQZ, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "SNEZ", - argLen: 1, - asm: riscv.ASNEZ, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 524288}, // R20 }, + clobbers: 524290, // R1 R20 }, }, { - name: "SLT", - argLen: 2, - asm: riscv.ASLT, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 1572866, // R1 R20 R21 }, }, { - name: "SLTI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTI, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 524288}, // R20 + {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 524288, // R20 }, }, { - name: "SLTU", - argLen: 2, - asm: riscv.ASLTU, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R21 + {1, 524288}, // R20 + {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 1572864, // R20 R21 }, }, { - name: "SLTIU", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTIU, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // X26 - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, + name: "LoweredAtomicStore8Variant", + argLen: 3, faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 16777216, // X25 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, + name: "LoweredAtomicStore32Variant", argLen: 3, faultOnNilArg0: true, - faultOnNilArg1: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 - {1, 8388608}, // X24 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 25165824, // X24 X25 }, }, { - name: "LoweredZero", - auxType: auxInt64, + name: "LoweredAtomicStore64Variant", argLen: 3, faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 16, // X5 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 - {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 112, // X5 X6 X7 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, + name: "LoweredAtomicCas64Variant", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, + name: "LoweredAtomicCas32Variant", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAdd32", + name: "LoweredAtomicAnd32", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMANDDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAdd64", + name: "LoweredAtomicOr32", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMORDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, + name: "LoweredAtomicAnd32value", + argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMANDDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, + name: "LoweredAtomicAnd64value", + argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMANDDBV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: riscv.AAMOANDW, + name: "LoweredAtomicOr32value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: riscv.AAMOORW, + name: "LoweredAtomicOr64value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -35197,16 +37292,35 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, + name: "FPFlagTrue", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 33554432}, // X26 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "FPFlagFalse", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 268435456}, // R29 }, }, }, @@ -35216,7 +37330,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -35226,7 +37340,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -35236,9 +37350,9 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 8388608}, // X24 + {0, 268435456}, // R29 }, }, }, @@ -35246,7 +37360,7 @@ var opcodeTable = [...]opInfo{ name: "LoweredPubBarrier", argLen: 1, hasSideEffects: true, - asm: riscv.AFENCE, + asm: loong64.ADBAR, reg: regInfo{}, }, { @@ -35256,8 +37370,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 64}, // X7 - {1, 134217728}, // X28 + {0, 4194304}, // R23 + {1, 8388608}, // R24 }, }, }, @@ -35268,8 +37382,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // X6 - {1, 64}, // X7 + {0, 1048576}, // R21 + {1, 4194304}, // R23 }, }, }, @@ -35280,7893 +37394,22157 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 + {0, 524288}, // R20 + {1, 1048576}, // R21 }, }, }, { - name: "FADDS", - argLen: 2, - commutative: true, - asm: riscv.AFADDS, + name: "PRELD", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "PRELDX", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELDX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, + { - name: "FSUBS", - argLen: 2, - asm: riscv.AFSUBS, + name: "ADD", + argLen: 2, + commutative: true, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: riscv.AFMULS, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIVS", + name: "SUB", argLen: 2, - asm: riscv.AFDIVS, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMADDS", - argLen: 3, - commutative: true, - asm: riscv.AFMADDS, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMSUBS", - argLen: 3, + name: "MUL", + argLen: 2, commutative: true, - asm: riscv.AFMSUBS, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, + clobbers: 105553116266496, // HI LO outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNMADDS", - argLen: 3, + name: "MULT", + argLen: 2, commutative: true, - asm: riscv.AFNMADDS, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FNMSUBS", - argLen: 3, + name: "MULTU", + argLen: 2, commutative: true, - asm: riscv.AFNMSUBS, + asm: mips.AMULU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: riscv.AFSQRTS, + name: "DIV", + argLen: 2, + asm: mips.ADIV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FNEGS", - argLen: 1, - asm: riscv.AFNEGS, + name: "DIVU", + argLen: 2, + asm: mips.ADIVU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FMVSX", - argLen: 1, - asm: riscv.AFMVSX, + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTSW", - argLen: 1, - asm: riscv.AFCVTSW, + name: "ADDD", + argLen: 2, + commutative: true, + asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTSL", - argLen: 1, - asm: riscv.AFCVTSL, + name: "SUBF", + argLen: 2, + asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTWS", - argLen: 1, - asm: riscv.AFCVTWS, + name: "SUBD", + argLen: 2, + asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTLS", - argLen: 1, - asm: riscv.AFCVTLS, + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVF, + name: "MULD", + argLen: 2, + commutative: true, + asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVF, + name: "DIVF", + argLen: 2, + asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FEQS", - argLen: 2, - commutative: true, - asm: riscv.AFEQS, + name: "DIVD", + argLen: 2, + asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNES", + name: "AND", argLen: 2, commutative: true, - asm: riscv.AFNES, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FLTS", - argLen: 2, - asm: riscv.AFLTS, + name: "ANDconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FLES", - argLen: 2, - asm: riscv.AFLES, + name: "OR", + argLen: 2, + commutative: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredFMAXS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXS, + name: "ORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredFMINS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMINS, + name: "XOR", + argLen: 2, + commutative: true, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FADDD", - argLen: 2, - commutative: true, - asm: riscv.AFADDD, + name: "XORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: riscv.AFSUBD, + name: "NOR", + argLen: 2, + commutative: true, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMULD", - argLen: 2, - commutative: true, - asm: riscv.AFMULD, + name: "NORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: riscv.AFDIVD, + name: "NEG", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFMADDD, + name: "NEGF", + argLen: 1, + asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFMSUBD, + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFNMADDD, + name: "ABSD", + argLen: 1, + asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFNMSUBD, + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FSQRTD", + name: "SQRTF", argLen: 1, - asm: riscv.AFSQRTD, + asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNEGD", - argLen: 1, - asm: riscv.AFNEGD, + name: "SLL", + argLen: 2, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FABSD", - argLen: 1, - asm: riscv.AFABSD, + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FSGNJD", + name: "SRL", argLen: 2, - asm: riscv.AFSGNJD, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMVDX", - argLen: 1, - asm: riscv.AFMVDX, + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTDW", - argLen: 1, - asm: riscv.AFCVTDW, + name: "SRA", + argLen: 2, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTDL", - argLen: 1, - asm: riscv.AFCVTDL, + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTWD", + name: "CLZ", argLen: 1, - asm: riscv.AFCVTWD, + asm: mips.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTLD", - argLen: 1, - asm: riscv.AFCVTLD, + name: "SGT", + argLen: 2, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTDS", - argLen: 1, - asm: riscv.AFCVTDS, + name: "SGTconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTSD", + name: "SGTzero", argLen: 1, - asm: riscv.AFCVTSD, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVD, + name: "SGTU", + argLen: 2, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVD, + name: "SGTUconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FEQD", - argLen: 2, - commutative: true, - asm: riscv.AFEQD, + name: "SGTUzero", + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNED", - argLen: 2, - commutative: true, - asm: riscv.AFNED, + name: "CMPEQF", + argLen: 2, + asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FLTD", + name: "CMPEQD", argLen: 2, - asm: riscv.AFLTD, + asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FLED", + name: "CMPGEF", argLen: 2, - asm: riscv.AFLED, + asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "LoweredFMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMIND, + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "LoweredFMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXD, + name: "CMPGTF", + argLen: 2, + asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, - { - name: "FADDS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADDS, + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, + }, + }, + { + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVW, + reg: regInfo{ outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FADD", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADD, + name: "MOVFconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FSUBS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUBS, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FSUB", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUB, + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140737555464192}, // SP SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMULS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - }, - { - name: "FMUL", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMUL, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIVS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIVS, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIV", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIV, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNEGS", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEGS, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNEG", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEG, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMADDS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADDS, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMADD", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMSUBS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUBS, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMSUB", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUB, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LPDFR", - argLen: 1, - asm: s390x.ALPDFR, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LNDFR", - argLen: 1, - asm: s390x.ALNDFR, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "CPSDR", - argLen: 2, - asm: s390x.ACPSDR, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FIDBR", - auxType: auxInt8, - argLen: 1, - asm: s390x.AFIDBR, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMOVSload", + name: "MOVHstorezero", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVS, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMOVDload", + name: "MOVWstorezero", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVD, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVS, + name: "MOVWfpgp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVD, + name: "MOVWgpfp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMOVSloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVS, + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVD, + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "MOVHreg", + argLen: 1, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "MOVHUreg", + argLen: 1, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVSstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADD, + name: "CMOVZ", + argLen: 3, + resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADDW", + name: "CMOVZzero", argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADDW, + resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADD, + name: "MOVWF", + argLen: 1, + asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "ADDWconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADDW, + name: "MOVWD", + argLen: 1, + asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "ADDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADD, + name: "TRUNCFW", + argLen: 1, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "ADDWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADDW, + name: "TRUNCDW", + argLen: 1, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SUB", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUB, + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SUBW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUBW, + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SUBconst", - auxType: auxInt32, + name: "CALLstatic", + auxType: auxCallOff, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.ASUB, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "SUBWconst", - auxType: auxInt32, + name: "CALLtail", + auxType: auxCallOff, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.ASUBW, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "SUBload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUB, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 4194304}, // R22 + {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "SUBWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUBW, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "MULLD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "MULLWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "MULLDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLD, + name: "LoweredAtomicStorezero", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "MULLWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLW, + name: "LoweredAtomicExchange", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULHD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHD, + name: "LoweredAtomicAdd", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULHDU", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHDU, + name: "LoweredAtomicAddconst", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "DIVD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVD, + name: "LoweredAtomicCas", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "DIVW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVW, + name: "LoweredAtomicAnd", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "DIVDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVDU, + name: "LoweredAtomicOr", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "DIVWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVWU, + name: "LoweredZero", + auxType: auxInt32, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2}, // R1 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 2, // R1 }, }, { - name: "MODD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODD, + name: "LoweredMove", + auxType: auxInt32, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 6, // R1 R2 }, }, { - name: "MODW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODW, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, - clobbers: 2048, // R11 + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MODDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODDU, + name: "FPFlagFalse", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MODWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODWU, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4194304}, // R22 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AAND, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ANDW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ANDconst", + name: "LoweredWB", auxType: auxInt64, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.AAND, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 16777216}, // R25 }, }, }, { - name: "ANDWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: mips.ASYNC, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 8}, // R3 + {1, 16}, // R4 }, }, }, { - name: "ANDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AAND, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "ANDWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AANDW, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AOR, + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // R5 + {1, 8}, // R3 + {2, 16}, // R4 }, }, }, { - name: "ORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AORW, + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // R5 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AOR, + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // R5 + {1, 2}, // R1 + {2, 4}, // R2 }, }, }, + { - name: "ORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AORW, + name: "ADDV", + argLen: 2, + commutative: true, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AOR, + name: "ADDVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AORW, + name: "SUBV", + argLen: 2, + asm: mips.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "XORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "MULV", + argLen: 2, + commutative: true, + asm: mips.AMULV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "MULVU", + argLen: 2, + commutative: true, + asm: mips.AMULVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "DIVV", + argLen: 2, + asm: mips.ADIVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXOR, + name: "DIVVU", + argLen: 2, + asm: mips.ADIVVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXORW, + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDC", + name: "ADDD", argLen: 2, commutative: true, - asm: s390x.AADDC, + asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDCconst", - auxType: auxInt16, - argLen: 1, - asm: s390x.AADDC, + name: "SUBF", + argLen: 2, + asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - resultInArg0: true, - asm: s390x.AADDE, + name: "SUBD", + argLen: 2, + asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBC", - argLen: 2, - asm: s390x.ASUBC, + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBE", - argLen: 3, - resultInArg0: true, - asm: s390x.ASUBE, + name: "MULD", + argLen: 2, + commutative: true, + asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMP", + name: "DIVF", argLen: 2, - asm: s390x.ACMP, + asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPW", + name: "DIVD", argLen: 2, - asm: s390x.ACMPW, + asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPU", - argLen: 2, - asm: s390x.ACMPU, + name: "AND", + argLen: 2, + commutative: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: s390x.ACMPWU, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMP, + name: "OR", + argLen: 2, + commutative: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, + name: "ORconst", + auxType: auxInt64, argLen: 1, - asm: s390x.ACMPW, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPUconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPU, + name: "XOR", + argLen: 2, + commutative: true, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, + name: "XORconst", + auxType: auxInt64, argLen: 1, - asm: s390x.ACMPWU, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCMPS", - argLen: 2, - asm: s390x.ACEBR, + name: "NOR", + argLen: 2, + commutative: true, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCMP", - argLen: 2, - asm: s390x.AFCMPU, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LTDBR", + name: "NEGV", argLen: 1, - asm: s390x.ALTDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LTEBR", + name: "NEGF", argLen: 1, - asm: s390x.ALTEBR, + asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLD", - argLen: 2, - asm: s390x.ASLD, + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLW", - argLen: 2, - asm: s390x.ASLW, + name: "ABSD", + argLen: 1, + asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLD, + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLWconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLW, + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRD", + name: "SLLV", argLen: 2, - asm: s390x.ASRD, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRW", - argLen: 2, - asm: s390x.ASRW, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASRD, + name: "SRLV", + argLen: 2, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRWconst", - auxType: auxUInt8, + name: "SRLVconst", + auxType: auxInt64, argLen: 1, - asm: s390x.ASRW, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRAD", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAD, + name: "SRAV", + argLen: 2, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRAW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAW, + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRADconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAD, + name: "SGT", + argLen: 2, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRAWconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAW, + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLLG", + name: "SGTU", argLen: 2, - asm: s390x.ARLLG, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLL", - argLen: 2, - asm: s390x.ARLL, + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLLconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ARLL, + name: "CMPEQF", + argLen: 2, + asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RXSBG", - auxType: auxS390XRotateParams, - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ARXSBG, + name: "CMPEQD", + argLen: 2, + asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RISBGZ", - auxType: auxS390XRotateParams, - argLen: 1, - clobberFlags: true, - asm: s390x.ARISBGZ, + name: "CMPGEF", + argLen: 2, + asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEG", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEG, + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGW", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEGW, + name: "CMPGTF", + argLen: 2, + asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOT", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOTW", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FSQRT", - argLen: 1, - asm: s390x.AFSQRT, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: s390x.AFSQRTS, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LOCGR", - auxType: auxS390XCCMask, - argLen: 3, - resultInArg0: true, - asm: s390x.ALOCGR, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018460942336}, // SP SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: s390x.AMOVB, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBZreg", - argLen: 1, - asm: s390x.AMOVBZ, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: s390x.AMOVH, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHZreg", - argLen: 1, - asm: s390x.AMOVHZ, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: s390x.AMOVW, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWZreg", - argLen: 1, - asm: s390x.AMOVWZ, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: s390x.AMOVD, + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVV, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LDGR", - argLen: 1, - asm: s390x.ALDGR, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LGDR", - argLen: 1, - asm: s390x.ALGDR, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CFDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACFDBRA, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CGDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACGDBRA, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CFEBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACFEBRA, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CGEBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACGEBRA, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CEFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEFBRA, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CDFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDFBRA, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CEGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEGBRA, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CDGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDGBRA, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CLFEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFEBR, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CLFDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFDBR, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CLGEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGEBR, + name: "MOVWfpgp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLGDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGDBR, + name: "MOVWgpfp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CELFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELFBR, + name: "MOVVfpgp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CDLFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLFBR, + name: "MOVVgpfp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CELGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELGBR, + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CDLGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLGBR, + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LEDBR", + name: "MOVHreg", argLen: 1, - asm: s390x.ALEDBR, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LDEBR", + name: "MOVHUreg", argLen: 1, - asm: s390x.ALDEBR, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDaddridx", - auxType: auxSymOff, - argLen: 2, - symEffect: SymAddr, + name: "MOVWUreg", + argLen: 1, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "MOVVreg", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "MOVWF", + argLen: 1, + asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "MOVWD", + argLen: 1, + asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "MOVVF", + argLen: 1, + asm: mips.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVW, + name: "MOVVD", + argLen: 1, + asm: mips.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "TRUNCFW", + argLen: 1, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWBR", + name: "TRUNCDW", argLen: 1, - asm: s390x.AMOVWBR, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDBR", + name: "TRUNCFV", argLen: 1, - asm: s390x.AMOVDBR, + asm: mips.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "TRUNCDV", + argLen: 1, + asm: mips.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWBR, + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVDBR, + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 4194304}, // R22 + {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 134217730, // R1 R31 }, }, { - name: "MOVDstore", - auxType: auxSymOff, + name: "DUFFCOPY", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 134217734, // R1 R2 R31 }, }, { - name: "MOVHBRstore", - auxType: auxSymOff, + name: "LoweredZero", + auxType: auxInt64, argLen: 3, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 2}, // R1 + {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 2, // R1 }, }, { - name: "MOVWBRstore", - auxType: auxSymOff, - argLen: 3, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4}, // R2 + {1, 2}, // R1 + {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 6, // R1 R2 }, }, { - name: "MOVDBRstore", - auxType: auxSymOff, + name: "LoweredAtomicAnd32", argLen: 3, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MVC", - auxType: auxSymValAndOff, + name: "LoweredAtomicOr32", argLen: 3, - clobberFlags: true, faultOnNilArg0: true, - faultOnNilArg1: true, - symEffect: SymNone, - asm: s390x.AMVC, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVBZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVWZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVWloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVW, + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVDloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVHBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWBR, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVDBR, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, }, }, { - name: "MOVDBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + name: "FPFlagTrue", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "FPFlagFalse", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 4194304}, // R22 }, }, }, { - name: "MOVWstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLEAR", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ACLEAR, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + outputs: []outputInfo{ + {0, 16777216}, // R25 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: mips.ASYNC, + reg: regInfo{}, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 8}, // R3 + {1, 16}, // R4 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 4096}, // R12 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4}, // R2 + {1, 8}, // R3 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 2}, // R1 + {1, 4}, // R2 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, + { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LoweredGetG", - argLen: 1, + name: "ADD", + argLen: 2, + commutative: true, + asm: ppc64.AADD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "ADDCC", + argLen: 2, + commutative: true, + asm: ppc64.AADDCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 4096}, // R12 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "ADDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDCCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "FADD", + argLen: 2, + commutative: true, + asm: ppc64.AFADD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "FADDS", + argLen: 2, + commutative: true, + asm: ppc64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "SUB", + argLen: 2, + asm: ppc64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "SUBCC", + argLen: 2, + asm: ppc64.ASUBCC, reg: regInfo{ - clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 512}, // R9 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredPanicBoundsA", + name: "SUBFCconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSUB", + argLen: 2, + asm: ppc64.AFSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSUBS", + argLen: 2, + asm: ppc64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagOV", - argLen: 0, - reg: regInfo{}, - }, - { - name: "SYNC", - argLen: 1, - asm: s390x.ASYNC, - reg: regInfo{}, - }, - { - name: "MOVBZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "XSMINJDP", + argLen: 2, + asm: ppc64.AXSMINJDP, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "XSMAXJDP", + argLen: 2, + asm: ppc64.AXSMAXJDP, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVDatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "MULLD", + argLen: 2, + commutative: true, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "MULLW", + argLen: 2, + commutative: true, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LAA", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAA, + name: "MADDLD", + argLen: 3, + asm: ppc64.AMADDLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LAAG", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAAG, + name: "MULHD", + argLen: 2, + commutative: true, + asm: ppc64.AMULHD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "AddTupleFirst32", - argLen: 2, - reg: regInfo{}, - }, - { - name: "AddTupleFirst64", - argLen: 2, - reg: regInfo{}, - }, - { - name: "LAN", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, + name: "MULHW", + argLen: 2, + commutative: true, + asm: ppc64.AMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LANfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, + name: "MULHDU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDU, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2, // R1 }, }, { - name: "LAO", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, + name: "MULHDUCC", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDUCC, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LAOfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, - reg: regInfo{ + name: "MULHWU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHWU, + reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2, // R1 }, }, { - name: "LoweredAtomicCas32", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "FMUL", + argLen: 2, + commutative: true, + asm: ppc64.AFMUL, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicCas64", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "FMULS", + argLen: 2, + commutative: true, + asm: ppc64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicExchange32", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "FMADD", + argLen: 3, + asm: ppc64.AFMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicExchange64", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "FMADDS", + argLen: 3, + asm: ppc64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "FLOGR", - argLen: 1, - clobberFlags: true, - asm: s390x.AFLOGR, + name: "FMSUB", + argLen: 3, + asm: ppc64.AFMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 2, // R1 outputs: []outputInfo{ - {0, 1}, // R0 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "POPCNT", - argLen: 1, - clobberFlags: true, - asm: s390x.APOPCNT, + name: "FMSUBS", + argLen: 3, + asm: ppc64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MLGR", + name: "SRAD", argLen: 2, - asm: s390x.AMLGR, + asm: ppc64.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {1, 8}, // R3 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SumBytes2", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes4", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes8", - argLen: 1, - reg: regInfo{}, - }, - { - name: "STMG2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "SRAW", + argLen: 2, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STMG3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "SRD", + argLen: 2, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STMG4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "SRW", + argLen: 2, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STM2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "SLD", + argLen: 2, + asm: ppc64.ASLD, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STM3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "SLW", + argLen: 2, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STM4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "ROTL", + argLen: 2, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "ROTLW", + argLen: 2, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "CLRLSLWI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLWI, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2, // R1 }, }, - { - name: "LoweredStaticCall", - auxType: auxCallOff, + name: "CLRLSLDI", + auxType: auxInt32, argLen: 1, - call: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g - }, - }, - { - name: "LoweredTailCall", - auxType: auxCallOff, - argLen: 1, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g - }, - }, - { - name: "LoweredClosureCall", - auxType: auxCallOff, - argLen: 3, - call: true, + asm: ppc64.ACLRLSLDI, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredInterCall", - auxType: auxCallOff, - argLen: 2, - call: true, + name: "ADDC", + argLen: 2, + commutative: true, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredAddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "SUBC", + argLen: 2, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMove", + name: "ADDCconst", auxType: auxInt64, - argLen: 3, + argLen: 1, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredZero", + name: "SUBCconst", auxType: auxInt64, - argLen: 2, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "ADDE", + argLen: 3, + commutative: true, + asm: ppc64.AADDE, reg: regInfo{ + inputs: []inputInfo{ + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "ADDZE", + argLen: 2, + asm: ppc64.AADDZE, reg: regInfo{ + inputs: []inputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "SUBE", + argLen: 3, + asm: ppc64.ASUBE, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredConvert", - argLen: 2, + name: "ADDZEzero", + argLen: 1, + asm: ppc64.AADDZE, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372036854775808}, // XER }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "Select", - argLen: 3, - asm: wasm.ASelect, + name: "SUBZEzero", + argLen: 1, + asm: ppc64.ASUBZE, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 9223372036854775808}, // XER }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load8U", + name: "SRADconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8U, + argLen: 1, + asm: ppc64.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load8S", + name: "SRAWconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8S, + argLen: 1, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load16U", + name: "SRDconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16U, + argLen: 1, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load16S", + name: "SRWconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16S, + argLen: 1, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load32U", + name: "SLDconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32U, - reg: regInfo{ + argLen: 1, + asm: ppc64.ASLD, + reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load32S", + name: "SLWconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32S, + argLen: 1, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load", + name: "ROTLconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load, + argLen: 1, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Store8", + name: "ROTLWconst", auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store8, + argLen: 1, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "I64Store16", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store16, - reg: regInfo{ - inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Store32", + name: "EXTSWSLconst", auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store32, + argLen: 1, + asm: ppc64.AEXTSWSLI, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Store", + name: "RLWINM", auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store, + argLen: 1, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Load", + name: "RLWNM", auxType: auxInt64, argLen: 2, - asm: wasm.AF32Load, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AF64Load, + name: "RLWMI", + auxType: auxInt64, + argLen: 2, + resultInArg0: true, + asm: ppc64.ARLWMI, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Store", + name: "RLDICL", auxType: auxInt64, - argLen: 3, - asm: wasm.AF32Store, + argLen: 1, + asm: ppc64.ARLDICL, reg: regInfo{ inputs: []inputInfo{ - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Store", + name: "RLDICLCC", auxType: auxInt64, - argLen: 3, - asm: wasm.AF64Store, + argLen: 1, + asm: ppc64.ARLDICLCC, reg: regInfo{ inputs: []inputInfo{ - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "I64Const", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Const", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, + name: "RLDICR", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLDICR, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Const", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, + name: "CNTLZD", + argLen: 1, + asm: ppc64.ACNTLZD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Eqz", + name: "CNTLZDCC", argLen: 1, - asm: wasm.AI64Eqz, + asm: ppc64.ACNTLZDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Eq", - argLen: 2, - asm: wasm.AI64Eq, + name: "CNTLZW", + argLen: 1, + asm: ppc64.ACNTLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Ne", - argLen: 2, - asm: wasm.AI64Ne, + name: "CNTTZD", + argLen: 1, + asm: ppc64.ACNTTZD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64LtS", - argLen: 2, - asm: wasm.AI64LtS, + name: "CNTTZW", + argLen: 1, + asm: ppc64.ACNTTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64LtU", - argLen: 2, - asm: wasm.AI64LtU, + name: "POPCNTD", + argLen: 1, + asm: ppc64.APOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64GtS", - argLen: 2, - asm: wasm.AI64GtS, + name: "POPCNTW", + argLen: 1, + asm: ppc64.APOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64GtU", - argLen: 2, - asm: wasm.AI64GtU, + name: "POPCNTB", + argLen: 1, + asm: ppc64.APOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64LeS", + name: "FDIV", argLen: 2, - asm: wasm.AI64LeS, + asm: ppc64.AFDIV, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64LeU", + name: "FDIVS", argLen: 2, - asm: wasm.AI64LeU, + asm: ppc64.AFDIVS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64GeS", + name: "DIVD", argLen: 2, - asm: wasm.AI64GeS, + asm: ppc64.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64GeU", + name: "DIVW", argLen: 2, - asm: wasm.AI64GeU, + asm: ppc64.ADIVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Eq", + name: "DIVDU", argLen: 2, - asm: wasm.AF32Eq, + asm: ppc64.ADIVDU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Ne", + name: "DIVWU", argLen: 2, - asm: wasm.AF32Ne, + asm: ppc64.ADIVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Lt", + name: "MODUD", argLen: 2, - asm: wasm.AF32Lt, + asm: ppc64.AMODUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Gt", + name: "MODSD", argLen: 2, - asm: wasm.AF32Gt, + asm: ppc64.AMODSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Le", + name: "MODUW", argLen: 2, - asm: wasm.AF32Le, + asm: ppc64.AMODUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Ge", + name: "MODSW", argLen: 2, - asm: wasm.AF32Ge, + asm: ppc64.AMODSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Eq", - argLen: 2, - asm: wasm.AF64Eq, + name: "FCTIDZ", + argLen: 1, + asm: ppc64.AFCTIDZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Ne", - argLen: 2, - asm: wasm.AF64Ne, + name: "FCTIWZ", + argLen: 1, + asm: ppc64.AFCTIWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Lt", - argLen: 2, - asm: wasm.AF64Lt, + name: "FCFID", + argLen: 1, + asm: ppc64.AFCFID, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Gt", - argLen: 2, - asm: wasm.AF64Gt, + name: "FCFIDS", + argLen: 1, + asm: ppc64.AFCFIDS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Le", - argLen: 2, - asm: wasm.AF64Le, + name: "FRSP", + argLen: 1, + asm: ppc64.AFRSP, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Ge", - argLen: 2, - asm: wasm.AF64Ge, + name: "MFVSRD", + argLen: 1, + asm: ppc64.AMFVSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Add", - argLen: 2, - asm: wasm.AI64Add, + name: "MTVSRD", + argLen: 1, + asm: ppc64.AMTVSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64AddConst", - auxType: auxInt64, - argLen: 1, - asm: wasm.AI64Add, + name: "AND", + argLen: 2, + commutative: true, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Sub", + name: "ANDN", argLen: 2, - asm: wasm.AI64Sub, + asm: ppc64.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Mul", + name: "ANDNCC", argLen: 2, - asm: wasm.AI64Mul, + asm: ppc64.AANDNCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64DivS", - argLen: 2, - asm: wasm.AI64DivS, + name: "ANDCC", + argLen: 2, + commutative: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64DivU", - argLen: 2, - asm: wasm.AI64DivU, + name: "OR", + argLen: 2, + commutative: true, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64RemS", + name: "ORN", argLen: 2, - asm: wasm.AI64RemS, + asm: ppc64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64RemU", - argLen: 2, - asm: wasm.AI64RemU, + name: "ORCC", + argLen: 2, + commutative: true, + asm: ppc64.AORCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64And", - argLen: 2, - asm: wasm.AI64And, + name: "NOR", + argLen: 2, + commutative: true, + asm: ppc64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Or", - argLen: 2, - asm: wasm.AI64Or, + name: "NORCC", + argLen: 2, + commutative: true, + asm: ppc64.ANORCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Xor", - argLen: 2, - asm: wasm.AI64Xor, + name: "XOR", + argLen: 2, + commutative: true, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Shl", - argLen: 2, - asm: wasm.AI64Shl, + name: "XORCC", + argLen: 2, + commutative: true, + asm: ppc64.AXORCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64ShrS", - argLen: 2, - asm: wasm.AI64ShrS, + name: "EQV", + argLen: 2, + commutative: true, + asm: ppc64.AEQV, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64ShrU", - argLen: 2, - asm: wasm.AI64ShrU, + name: "NEG", + argLen: 1, + asm: ppc64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Neg", + name: "NEGCC", argLen: 1, - asm: wasm.AF32Neg, + asm: ppc64.ANEGCC, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Add", - argLen: 2, - asm: wasm.AF32Add, + name: "BRD", + argLen: 1, + asm: ppc64.ABRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Sub", - argLen: 2, - asm: wasm.AF32Sub, + name: "BRW", + argLen: 1, + asm: ppc64.ABRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Mul", - argLen: 2, - asm: wasm.AF32Mul, + name: "BRH", + argLen: 1, + asm: ppc64.ABRH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Div", - argLen: 2, - asm: wasm.AF32Div, + name: "FNEG", + argLen: 1, + asm: ppc64.AFNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Neg", + name: "FSQRT", argLen: 1, - asm: wasm.AF64Neg, + asm: ppc64.AFSQRT, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Add", - argLen: 2, - asm: wasm.AF64Add, + name: "FSQRTS", + argLen: 1, + asm: ppc64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Sub", - argLen: 2, - asm: wasm.AF64Sub, + name: "FFLOOR", + argLen: 1, + asm: ppc64.AFRIM, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Mul", - argLen: 2, - asm: wasm.AF64Mul, + name: "FCEIL", + argLen: 1, + asm: ppc64.AFRIP, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Div", - argLen: 2, - asm: wasm.AF64Div, + name: "FTRUNC", + argLen: 1, + asm: ppc64.AFRIZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF64S", + name: "FROUND", argLen: 1, - asm: wasm.AI64TruncSatF64S, + asm: ppc64.AFRIN, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF64U", + name: "FABS", argLen: 1, - asm: wasm.AI64TruncSatF64U, + asm: ppc64.AFABS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF32S", + name: "FNABS", argLen: 1, - asm: wasm.AI64TruncSatF32S, + asm: ppc64.AFNABS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF32U", - argLen: 1, - asm: wasm.AI64TruncSatF32U, + name: "FCPSGN", + argLen: 2, + asm: ppc64.AFCPSGN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F32ConvertI64S", - argLen: 1, - asm: wasm.AF32ConvertI64S, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32ConvertI64U", - argLen: 1, - asm: wasm.AF32ConvertI64U, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64ConvertI64S", - argLen: 1, - asm: wasm.AF64ConvertI64S, + name: "ANDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64ConvertI64U", - argLen: 1, - asm: wasm.AF64ConvertI64U, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32DemoteF64", + name: "MOVBreg", argLen: 1, - asm: wasm.AF32DemoteF64, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64PromoteF32", + name: "MOVBZreg", argLen: 1, - asm: wasm.AF64PromoteF32, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Extend8S", + name: "MOVHreg", argLen: 1, - asm: wasm.AI64Extend8S, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Extend16S", + name: "MOVHZreg", argLen: 1, - asm: wasm.AI64Extend16S, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Extend32S", + name: "MOVWreg", argLen: 1, - asm: wasm.AI64Extend32S, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Sqrt", + name: "MOVWZreg", argLen: 1, - asm: wasm.AF32Sqrt, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Trunc", - argLen: 1, - asm: wasm.AF32Trunc, + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Ceil", - argLen: 1, - asm: wasm.AF32Ceil, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Floor", - argLen: 1, - asm: wasm.AF32Floor, + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Nearest", - argLen: 1, - asm: wasm.AF32Nearest, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Abs", - argLen: 1, - asm: wasm.AF32Abs, + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Copysign", - argLen: 2, - asm: wasm.AF32Copysign, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Sqrt", - argLen: 1, - asm: wasm.AF64Sqrt, + name: "MOVDBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Trunc", - argLen: 1, - asm: wasm.AF64Trunc, + name: "MOVWBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Ceil", - argLen: 1, - asm: wasm.AF64Ceil, + name: "MOVHBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Floor", - argLen: 1, - asm: wasm.AF64Floor, + name: "MOVBZloadidx", + argLen: 3, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Nearest", - argLen: 1, - asm: wasm.AF64Nearest, + name: "MOVHloadidx", + argLen: 3, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Abs", - argLen: 1, - asm: wasm.AF64Abs, + name: "MOVHZloadidx", + argLen: 3, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Copysign", - argLen: 2, - asm: wasm.AF64Copysign, + name: "MOVWloadidx", + argLen: 3, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Ctz", - argLen: 1, - asm: wasm.AI64Ctz, + name: "MOVWZloadidx", + argLen: 3, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Clz", - argLen: 1, - asm: wasm.AI64Clz, + name: "MOVDloadidx", + argLen: 3, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I32Rotl", - argLen: 2, - asm: wasm.AI32Rotl, + name: "MOVHBRloadidx", + argLen: 3, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Rotl", - argLen: 2, - asm: wasm.AI64Rotl, + name: "MOVWBRloadidx", + argLen: 3, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRloadidx", + argLen: 3, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDloadidx", + argLen: 3, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64Popcnt", - argLen: 1, - asm: wasm.AI64Popcnt, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, + name: "FMOVSloadidx", + argLen: 3, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "DCBT", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: ppc64.ADCBT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVBstoreidx", + argLen: 4, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstoreidx", + argLen: 4, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstoreidx", + argLen: 4, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstoreidx", + argLen: 4, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDstoreidx", + argLen: 4, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSstoreidx", + argLen: 4, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVHBRstoreidx", + argLen: 4, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRstoreidx", + argLen: 4, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRstoreidx", + argLen: 4, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCMPU", + argLen: 2, + asm: ppc64.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: ppc64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPU", + argLen: 2, + asm: ppc64.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: ppc64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWU", + argLen: 2, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPUconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ISEL", + auxType: auxInt32, + argLen: 3, + asm: ppc64.AISEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ISELZ", + auxType: auxInt32, + argLen: 2, + asm: ppc64.AISEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SETBC", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SETBCR", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBCR, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "Equal", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "NotEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FLessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FLessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "GreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FGreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "GreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FGreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2048}, // R11 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 2147483648, // R31 + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4096}, // R12 + {1, 2048}, // R11 + }, + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4096}, // R12 + }, + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + }, + clobbers: 1048576, // R20 + }, + }, + { + name: "LoweredZeroShort", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadZeroShort", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadZero", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + }, + clobbers: 1048576, // R20 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + {1, 2097152}, // R21 + }, + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + {1, 2097152}, // R21 + }, + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredQuadMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore8", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore32", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore64", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad8", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoadPtr", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange8", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicCas64", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicCas32", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAnd8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicOr8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + outputs: []outputInfo{ + {0, 536870912}, // R29 + }, + }, + }, + { + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: ppc64.ALWSYNC, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 64}, // R6 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // R4 + {1, 32}, // R5 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 8}, // R3 + {1, 16}, // R4 + }, + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + + { + name: "ADD", + argLen: 2, + commutative: true, + asm: riscv.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ADDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ADDIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NEG", + argLen: 1, + asm: riscv.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NEGW", + argLen: 1, + asm: riscv.ANEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: riscv.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SUBW", + argLen: 2, + asm: riscv.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MUL", + argLen: 2, + commutative: true, + asm: riscv.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULW", + argLen: 2, + commutative: true, + asm: riscv.AMULW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULH", + argLen: 2, + commutative: true, + asm: riscv.AMULH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULHU", + argLen: 2, + commutative: true, + asm: riscv.AMULHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredMuluhilo", + argLen: 2, + resultNotInArgs: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredMuluover", + argLen: 2, + resultNotInArgs: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIV", + argLen: 2, + asm: riscv.ADIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVU", + argLen: 2, + asm: riscv.ADIVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + asm: riscv.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVUW", + argLen: 2, + asm: riscv.ADIVUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REM", + argLen: 2, + asm: riscv.AREM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMU", + argLen: 2, + asm: riscv.AREMU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMW", + argLen: 2, + asm: riscv.AREMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMUW", + argLen: 2, + asm: riscv.AREMUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDreg", + argLen: 1, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: riscv.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: riscv.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWUreg", + argLen: 1, + asm: riscv.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLL", + argLen: 2, + asm: riscv.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLLW", + argLen: 2, + asm: riscv.ASLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRA", + argLen: 2, + asm: riscv.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + asm: riscv.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRL", + argLen: 2, + asm: riscv.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLW", + argLen: 2, + asm: riscv.ASRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SH1ADD", + argLen: 2, + asm: riscv.ASH1ADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SH2ADD", + argLen: 2, + asm: riscv.ASH2ADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SH3ADD", + argLen: 2, + asm: riscv.ASH3ADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: riscv.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ANDN", + argLen: 2, + asm: riscv.AANDN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ANDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AANDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CLZ", + argLen: 1, + asm: riscv.ACLZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CLZW", + argLen: 1, + asm: riscv.ACLZW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CPOP", + argLen: 1, + asm: riscv.ACPOP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CPOPW", + argLen: 1, + asm: riscv.ACPOPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CTZ", + argLen: 1, + asm: riscv.ACTZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CTZW", + argLen: 1, + asm: riscv.ACTZW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NOT", + argLen: 1, + asm: riscv.ANOT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: riscv.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ORN", + argLen: 2, + asm: riscv.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REV8", + argLen: 1, + asm: riscv.AREV8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ROL", + argLen: 2, + asm: riscv.AROL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ROLW", + argLen: 2, + asm: riscv.AROLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ROR", + argLen: 2, + asm: riscv.AROR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "RORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "RORIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "RORW", + argLen: 2, + asm: riscv.ARORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XNOR", + argLen: 2, + commutative: true, + asm: riscv.AXNOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: riscv.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AXORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MIN", + argLen: 2, + commutative: true, + asm: riscv.AMIN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MAX", + argLen: 2, + commutative: true, + asm: riscv.AMAX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MINU", + argLen: 2, + commutative: true, + asm: riscv.AMINU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MAXU", + argLen: 2, + commutative: true, + asm: riscv.AMAXU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SEQZ", + argLen: 1, + asm: riscv.ASEQZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SNEZ", + argLen: 1, + asm: riscv.ASNEZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLT", + argLen: 2, + asm: riscv.ASLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTU", + argLen: 2, + asm: riscv.ASLTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTIU", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTIU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 33554432}, // X26 + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 + }, + clobbers: 16777216, // X25 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 + {1, 8388608}, // X24 + }, + clobbers: 25165824, // X24 X25 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 16, // X5 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 112, // X5 X6 X7 + }, + }, + { + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOORW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 33554432}, // X26 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 8388608}, // X24 + }, + }, + }, + { + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: riscv.AFENCE, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 64}, // X7 + {1, 134217728}, // X28 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // X6 + {1, 64}, // X7 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + }, + }, + }, + { + name: "FADDS", + argLen: 2, + commutative: true, + asm: riscv.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + asm: riscv.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + asm: riscv.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + asm: riscv.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: riscv.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + asm: riscv.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVSX", + argLen: 1, + asm: riscv.AFMVSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSW", + argLen: 1, + asm: riscv.AFCVTSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSL", + argLen: 1, + asm: riscv.AFCVTSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWS", + argLen: 1, + asm: riscv.AFCVTWS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLS", + argLen: 1, + asm: riscv.AFCVTLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FMOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQS", + argLen: 2, + commutative: true, + asm: riscv.AFEQS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FNES", + argLen: 2, + commutative: true, + asm: riscv.AFNES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLTS", + argLen: 2, + asm: riscv.AFLTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLES", + argLen: 2, + asm: riscv.AFLES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredFMAXS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredFMINS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMINS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FADDD", + argLen: 2, + commutative: true, + asm: riscv.AFADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBD", + argLen: 2, + asm: riscv.AFSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULD", + argLen: 2, + commutative: true, + asm: riscv.AFMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVD", + argLen: 2, + asm: riscv.AFDIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTD", + argLen: 1, + asm: riscv.AFSQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGD", + argLen: 1, + asm: riscv.AFNEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FABSD", + argLen: 1, + asm: riscv.AFABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSGNJD", + argLen: 2, + asm: riscv.AFSGNJD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVDX", + argLen: 1, + asm: riscv.AFMVDX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDW", + argLen: 1, + asm: riscv.AFCVTDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDL", + argLen: 1, + asm: riscv.AFCVTDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWD", + argLen: 1, + asm: riscv.AFCVTWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLD", + argLen: 1, + asm: riscv.AFCVTLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTDS", + argLen: 1, + asm: riscv.AFCVTDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSD", + argLen: 1, + asm: riscv.AFCVTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQD", + argLen: 2, + commutative: true, + asm: riscv.AFEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FNED", + argLen: 2, + commutative: true, + asm: riscv.AFNED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLTD", + argLen: 2, + asm: riscv.AFLTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLED", + argLen: 2, + asm: riscv.AFLED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredFMIND", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMIND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredFMAXD", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + + { + name: "FADDS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FADD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSUB", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMUL", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FDIV", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FNEG", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMADD", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMSUB", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LPDFR", + argLen: 1, + asm: s390x.ALPDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LNDFR", + argLen: 1, + asm: s390x.ALNDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CPSDR", + argLen: 2, + asm: s390x.ACPSDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FIDBR", + auxType: auxInt8, + argLen: 1, + asm: s390x.AFIDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "ADD", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDWconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUB", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLW", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULHD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MULHDU", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDC", + argLen: 2, + commutative: true, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDCconst", + auxType: auxInt16, + argLen: 1, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDE", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: s390x.AADDE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBC", + argLen: 2, + asm: s390x.ASUBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBE", + argLen: 3, + resultInArg0: true, + asm: s390x.ASUBE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPU", + argLen: 2, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWU", + argLen: 2, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "FCMPS", + argLen: 2, + asm: s390x.ACEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FCMP", + argLen: 2, + asm: s390x.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LTDBR", + argLen: 1, + asm: s390x.ALTDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LTEBR", + argLen: 1, + asm: s390x.ALTEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SLD", + argLen: 2, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLW", + argLen: 2, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRD", + argLen: 2, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRW", + argLen: 2, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAD", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRADconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAWconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLLG", + argLen: 2, + asm: s390x.ARLLG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLL", + argLen: 2, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLLconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RXSBG", + auxType: auxS390XRotateParams, + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ARXSBG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RISBGZ", + auxType: auxS390XRotateParams, + argLen: 1, + clobberFlags: true, + asm: s390x.ARISBGZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NEG", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NEGW", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NOT", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NOTW", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "FSQRT", + argLen: 1, + asm: s390x.AFSQRT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: s390x.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LOCGR", + auxType: auxS390XCCMask, + argLen: 3, + resultInArg0: true, + asm: s390x.ALOCGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBZreg", + argLen: 1, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZreg", + argLen: 1, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZreg", + argLen: 1, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: s390x.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LDGR", + argLen: 1, + asm: s390x.ALDGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LGDR", + argLen: 1, + asm: s390x.ALGDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CFDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CGDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CFEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CGEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CEFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CEGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CLFEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLFDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CELFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CELGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LEDBR", + argLen: 1, + asm: s390x.ALEDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LDEBR", + argLen: 1, + asm: s390x.ALDEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDaddridx", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBR", + argLen: 1, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBR", + argLen: 1, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MVC", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + symEffect: SymNone, + asm: s390x.AMVC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVBZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVHstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVDstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "CLEAR", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ACLEAR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4096}, // R12 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4096}, // R12 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + outputs: []outputInfo{ + {0, 512}, // R9 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // R0 + {1, 2}, // R1 + }, + }, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagOV", + argLen: 0, + reg: regInfo{}, + }, + { + name: "SYNC", + argLen: 1, + asm: s390x.ASYNC, + reg: regInfo{}, + }, + { + name: "MOVBZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LAA", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LAAG", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAAG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "AddTupleFirst32", + argLen: 2, + reg: regInfo{}, + }, + { + name: "AddTupleFirst64", + argLen: 2, + reg: regInfo{}, + }, + { + name: "LAN", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LANfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + { + name: "LAO", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LAOfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + { + name: "LoweredAtomicCas32", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 1, // R0 + outputs: []outputInfo{ + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredAtomicCas64", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 1, // R0 + outputs: []outputInfo{ + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredAtomicExchange32", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // R0 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // R0 + }, + }, + }, + { + name: "FLOGR", + argLen: 1, + clobberFlags: true, + asm: s390x.AFLOGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + clobbers: 2, // R1 + outputs: []outputInfo{ + {0, 1}, // R0 + }, + }, + }, + { + name: "POPCNT", + argLen: 1, + clobberFlags: true, + asm: s390x.APOPCNT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MLGR", + argLen: 2, + asm: s390x.AMLGR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 8}, // R3 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "SumBytes2", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes4", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes8", + argLen: 1, + reg: regInfo{}, + }, + { + name: "STMG2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STMG3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STMG4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + + { + name: "LoweredStaticCall", + auxType: auxCallOff, + argLen: 1, + call: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredTailCall", + auxType: auxCallOff, + argLen: 1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredClosureCall", + auxType: auxCallOff, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredInterCall", + auxType: auxCallOff, + argLen: 2, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredAddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredConvert", + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "Select", + argLen: 3, + asm: wasm.ASelect, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Store8", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store16", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store32", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F32Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF32Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF32Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Const", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Const", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Const", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Eqz", + argLen: 1, + asm: wasm.AI64Eqz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Eq", + argLen: 2, + asm: wasm.AI64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Ne", + argLen: 2, + asm: wasm.AI64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtS", + argLen: 2, + asm: wasm.AI64LtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtU", + argLen: 2, + asm: wasm.AI64LtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtS", + argLen: 2, + asm: wasm.AI64GtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtU", + argLen: 2, + asm: wasm.AI64GtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeS", + argLen: 2, + asm: wasm.AI64LeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeU", + argLen: 2, + asm: wasm.AI64LeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeS", + argLen: 2, + asm: wasm.AI64GeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeU", + argLen: 2, + asm: wasm.AI64GeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Eq", + argLen: 2, + asm: wasm.AF32Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ne", + argLen: 2, + asm: wasm.AF32Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Lt", + argLen: 2, + asm: wasm.AF32Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Gt", + argLen: 2, + asm: wasm.AF32Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Le", + argLen: 2, + asm: wasm.AF32Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ge", + argLen: 2, + asm: wasm.AF32Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Eq", + argLen: 2, + asm: wasm.AF64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ne", + argLen: 2, + asm: wasm.AF64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Lt", + argLen: 2, + asm: wasm.AF64Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Gt", + argLen: 2, + asm: wasm.AF64Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Le", + argLen: 2, + asm: wasm.AF64Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ge", + argLen: 2, + asm: wasm.AF64Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Add", + argLen: 2, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64AddConst", + auxType: auxInt64, + argLen: 1, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Sub", + argLen: 2, + asm: wasm.AI64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Mul", + argLen: 2, + asm: wasm.AI64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivS", + argLen: 2, + asm: wasm.AI64DivS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivU", + argLen: 2, + asm: wasm.AI64DivU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemS", + argLen: 2, + asm: wasm.AI64RemS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemU", + argLen: 2, + asm: wasm.AI64RemU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64And", + argLen: 2, + asm: wasm.AI64And, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Or", + argLen: 2, + asm: wasm.AI64Or, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Xor", + argLen: 2, + asm: wasm.AI64Xor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Shl", + argLen: 2, + asm: wasm.AI64Shl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrS", + argLen: 2, + asm: wasm.AI64ShrS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrU", + argLen: 2, + asm: wasm.AI64ShrU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Neg", + argLen: 1, + asm: wasm.AF32Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Add", + argLen: 2, + asm: wasm.AF32Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Sub", + argLen: 2, + asm: wasm.AF32Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Mul", + argLen: 2, + asm: wasm.AF32Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Div", + argLen: 2, + asm: wasm.AF32Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Neg", + argLen: 1, + asm: wasm.AF64Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Add", + argLen: 2, + asm: wasm.AF64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Sub", + argLen: 2, + asm: wasm.AF64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Mul", + argLen: 2, + asm: wasm.AF64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Div", + argLen: 2, + asm: wasm.AF64Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64TruncSatF64S", + argLen: 1, + asm: wasm.AI64TruncSatF64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF64U", + argLen: 1, + asm: wasm.AI64TruncSatF64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32S", + argLen: 1, + asm: wasm.AI64TruncSatF32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32U", + argLen: 1, + asm: wasm.AI64TruncSatF32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32ConvertI64S", + argLen: 1, + asm: wasm.AF32ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32ConvertI64U", + argLen: 1, + asm: wasm.AF32ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64ConvertI64S", + argLen: 1, + asm: wasm.AF64ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64ConvertI64U", + argLen: 1, + asm: wasm.AF64ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32DemoteF64", + argLen: 1, + asm: wasm.AF32DemoteF64, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64PromoteF32", + argLen: 1, + asm: wasm.AF64PromoteF32, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Extend8S", + argLen: 1, + asm: wasm.AI64Extend8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend16S", + argLen: 1, + asm: wasm.AI64Extend16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend32S", + argLen: 1, + asm: wasm.AI64Extend32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Sqrt", + argLen: 1, + asm: wasm.AF32Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Trunc", + argLen: 1, + asm: wasm.AF32Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Ceil", + argLen: 1, + asm: wasm.AF32Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Floor", + argLen: 1, + asm: wasm.AF32Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Nearest", + argLen: 1, + asm: wasm.AF32Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Abs", + argLen: 1, + asm: wasm.AF32Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Copysign", + argLen: 2, + asm: wasm.AF32Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Sqrt", + argLen: 1, + asm: wasm.AF64Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Trunc", + argLen: 1, + asm: wasm.AF64Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Ceil", + argLen: 1, + asm: wasm.AF64Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Floor", + argLen: 1, + asm: wasm.AF64Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Nearest", + argLen: 1, + asm: wasm.AF64Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Abs", + argLen: 1, + asm: wasm.AF64Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Copysign", + argLen: 2, + asm: wasm.AF64Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Ctz", + argLen: 1, + asm: wasm.AI64Ctz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Clz", + argLen: 1, + asm: wasm.AI64Clz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32Rotl", + argLen: 2, + asm: wasm.AI32Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Rotl", + argLen: 2, + asm: wasm.AI64Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Popcnt", + argLen: 1, + asm: wasm.AI64Popcnt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + + { + name: "Add8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddPtr", + argLen: 2, + generic: true, + }, + { + name: "Add32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Sub8", + argLen: 2, + generic: true, + }, + { + name: "Sub16", + argLen: 2, + generic: true, + }, + { + name: "Sub32", + argLen: 2, + generic: true, + }, + { + name: "Sub64", + argLen: 2, + generic: true, + }, + { + name: "SubPtr", + argLen: 2, + generic: true, + }, + { + name: "Sub32F", + argLen: 2, + generic: true, + }, + { + name: "Sub64F", + argLen: 2, + generic: true, + }, + { + name: "Mul8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Div32F", + argLen: 2, + generic: true, + }, + { + name: "Div64F", + argLen: 2, + generic: true, + }, + { + name: "Hmul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul32u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Avg32u", + argLen: 2, + generic: true, + }, + { + name: "Avg64u", + argLen: 2, + generic: true, + }, + { + name: "Div8", + argLen: 2, + generic: true, + }, + { + name: "Div8u", + argLen: 2, + generic: true, + }, + { + name: "Div16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div16u", + argLen: 2, + generic: true, + }, + { + name: "Div32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div32u", + argLen: 2, + generic: true, + }, + { + name: "Div64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div64u", + argLen: 2, + generic: true, + }, + { + name: "Div128u", + argLen: 3, + generic: true, + }, + { + name: "Mod8", + argLen: 2, + generic: true, + }, + { + name: "Mod8u", + argLen: 2, + generic: true, + }, + { + name: "Mod16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod16u", + argLen: 2, + generic: true, + }, + { + name: "Mod32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod32u", + argLen: 2, + generic: true, + }, + { + name: "Mod64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod64u", + argLen: 2, + generic: true, + }, + { + name: "And8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Lsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Eq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqInter", + argLen: 2, + generic: true, + }, + { + name: "EqSlice", + argLen: 2, + generic: true, + }, + { + name: "Eq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqInter", + argLen: 2, + generic: true, + }, + { + name: "NeqSlice", + argLen: 2, + generic: true, + }, + { + name: "Neq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Less8", + argLen: 2, + generic: true, + }, + { + name: "Less8U", + argLen: 2, + generic: true, + }, + { + name: "Less16", + argLen: 2, + generic: true, + }, + { + name: "Less16U", + argLen: 2, + generic: true, + }, + { + name: "Less32", + argLen: 2, + generic: true, + }, + { + name: "Less32U", + argLen: 2, + generic: true, + }, + { + name: "Less64", + argLen: 2, + generic: true, + }, + { + name: "Less64U", + argLen: 2, + generic: true, + }, + { + name: "Less32F", + argLen: 2, + generic: true, + }, + { + name: "Less64F", + argLen: 2, + generic: true, + }, + { + name: "Leq8", + argLen: 2, + generic: true, + }, + { + name: "Leq8U", + argLen: 2, + generic: true, + }, + { + name: "Leq16", + argLen: 2, + generic: true, + }, + { + name: "Leq16U", + argLen: 2, + generic: true, + }, + { + name: "Leq32", + argLen: 2, + generic: true, + }, + { + name: "Leq32U", + argLen: 2, + generic: true, + }, + { + name: "Leq64", + argLen: 2, + generic: true, + }, + { + name: "Leq64U", + argLen: 2, + generic: true, + }, + { + name: "Leq32F", + argLen: 2, + generic: true, + }, + { + name: "Leq64F", + argLen: 2, + generic: true, + }, + { + name: "CondSelect", + argLen: 3, + generic: true, + }, + { + name: "AndB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Not", + argLen: 1, + generic: true, + }, + { + name: "Neg8", + argLen: 1, + generic: true, + }, + { + name: "Neg16", + argLen: 1, + generic: true, + }, + { + name: "Neg32", + argLen: 1, + generic: true, + }, + { + name: "Neg64", + argLen: 1, + generic: true, + }, + { + name: "Neg32F", + argLen: 1, + generic: true, + }, + { + name: "Neg64F", + argLen: 1, + generic: true, + }, + { + name: "Com8", + argLen: 1, + generic: true, + }, + { + name: "Com16", + argLen: 1, + generic: true, + }, + { + name: "Com32", + argLen: 1, + generic: true, + }, + { + name: "Com64", + argLen: 1, + generic: true, + }, + { + name: "Ctz8", + argLen: 1, + generic: true, + }, + { + name: "Ctz16", + argLen: 1, + generic: true, + }, + { + name: "Ctz32", + argLen: 1, + generic: true, + }, + { + name: "Ctz64", + argLen: 1, + generic: true, + }, + { + name: "Ctz64On32", + argLen: 2, + generic: true, + }, + { + name: "Ctz8NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz16NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz32NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz64NonZero", + argLen: 1, + generic: true, + }, + { + name: "BitLen8", + argLen: 1, + generic: true, + }, + { + name: "BitLen16", + argLen: 1, + generic: true, + }, + { + name: "BitLen32", + argLen: 1, + generic: true, + }, + { + name: "BitLen64", + argLen: 1, + generic: true, + }, + { + name: "Bswap16", + argLen: 1, + generic: true, + }, + { + name: "Bswap32", + argLen: 1, + generic: true, + }, + { + name: "Bswap64", + argLen: 1, + generic: true, + }, + { + name: "BitRev8", + argLen: 1, + generic: true, + }, + { + name: "BitRev16", + argLen: 1, + generic: true, + }, + { + name: "BitRev32", + argLen: 1, + generic: true, + }, + { + name: "BitRev64", + argLen: 1, + generic: true, + }, + { + name: "PopCount8", + argLen: 1, + generic: true, + }, + { + name: "PopCount16", + argLen: 1, + generic: true, + }, + { + name: "PopCount32", + argLen: 1, + generic: true, + }, + { + name: "PopCount64", + argLen: 1, + generic: true, + }, + { + name: "RotateLeft64", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft32", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft16", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft8", + argLen: 2, + generic: true, + }, + { + name: "Sqrt", + argLen: 1, + generic: true, + }, + { + name: "Sqrt32", + argLen: 1, + generic: true, + }, + { + name: "Floor", + argLen: 1, + generic: true, + }, + { + name: "Ceil", + argLen: 1, + generic: true, + }, + { + name: "Trunc", + argLen: 1, + generic: true, + }, + { + name: "Round", + argLen: 1, + generic: true, + }, + { + name: "RoundToEven", + argLen: 1, + generic: true, + }, + { + name: "Abs", + argLen: 1, + generic: true, + }, + { + name: "Copysign", + argLen: 2, + generic: true, + }, + { + name: "Min64", + argLen: 2, + generic: true, + }, + { + name: "Max64", + argLen: 2, + generic: true, + }, + { + name: "Min64u", + argLen: 2, + generic: true, + }, + { + name: "Max64u", + argLen: 2, + generic: true, + }, + { + name: "Min64F", + argLen: 2, + generic: true, + }, + { + name: "Min32F", + argLen: 2, + generic: true, + }, + { + name: "Max64F", + argLen: 2, + generic: true, + }, + { + name: "Max32F", + argLen: 2, + generic: true, + }, + { + name: "FMA", + argLen: 3, + generic: true, + }, + { + name: "Phi", + argLen: -1, + zeroWidth: true, + generic: true, + }, + { + name: "Copy", + argLen: 1, + generic: true, + }, + { + name: "Convert", + argLen: 2, + resultInArg0: true, + zeroWidth: true, + generic: true, + }, + { + name: "ConstBool", + auxType: auxBool, + argLen: 0, + generic: true, + }, + { + name: "ConstString", + auxType: auxString, + argLen: 0, + generic: true, + }, + { + name: "ConstNil", + argLen: 0, + generic: true, + }, + { + name: "Const8", + auxType: auxInt8, + argLen: 0, + generic: true, + }, + { + name: "Const16", + auxType: auxInt16, + argLen: 0, + generic: true, + }, + { + name: "Const32", + auxType: auxInt32, + argLen: 0, + generic: true, + }, + { + name: "Const64", + auxType: auxInt64, + argLen: 0, + generic: true, + }, + { + name: "Const32F", + auxType: auxFloat32, + argLen: 0, + generic: true, + }, + { + name: "Const64F", + auxType: auxFloat64, + argLen: 0, + generic: true, + }, + { + name: "ConstInterface", + argLen: 0, + generic: true, + }, + { + name: "ConstSlice", + argLen: 0, + generic: true, + }, + { + name: "InitMem", + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Arg", + auxType: auxSymOff, + argLen: 0, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "ArgIntReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "ArgFloatReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Addr", + auxType: auxSym, + argLen: 1, + symEffect: SymAddr, + generic: true, + }, + { + name: "LocalAddr", + auxType: auxSym, + argLen: 2, + symEffect: SymAddr, + generic: true, + }, + { + name: "SP", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, + }, + { + name: "SB", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, + }, + { + name: "SPanchored", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "Load", + argLen: 2, + generic: true, + }, + { + name: "Dereference", + argLen: 2, + generic: true, + }, + { + name: "Store", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "Move", + auxType: auxTypSize, + argLen: 3, + generic: true, + }, + { + name: "Zero", + auxType: auxTypSize, + argLen: 2, + generic: true, + }, + { + name: "StoreWB", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "MoveWB", + auxType: auxTypSize, + argLen: 3, + generic: true, + }, + { + name: "ZeroWB", + auxType: auxTypSize, + argLen: 2, + generic: true, + }, + { + name: "WBend", + argLen: 1, + generic: true, + }, + { + name: "WB", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "HasCPUFeature", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "PanicBounds", + auxType: auxInt64, + argLen: 3, + call: true, + generic: true, + }, + { + name: "PanicExtend", + auxType: auxInt64, + argLen: 4, + call: true, + generic: true, + }, + { + name: "ClosureCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "StaticCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "InterCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "TailCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "ClosureLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "StaticLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "InterLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "TailLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "SignExt8to16", + argLen: 1, + generic: true, + }, + { + name: "SignExt8to32", + argLen: 1, + generic: true, + }, + { + name: "SignExt8to64", + argLen: 1, + generic: true, + }, + { + name: "SignExt16to32", + argLen: 1, + generic: true, + }, + { + name: "SignExt16to64", + argLen: 1, + generic: true, + }, + { + name: "SignExt32to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to16", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to32", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt16to32", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt16to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt32to64", + argLen: 1, + generic: true, + }, + { + name: "Trunc16to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc32to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc32to16", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to16", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to32", + argLen: 1, + generic: true, + }, + { + name: "Cvt32to32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32to64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64to32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64to64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto32", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto64", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32F", + argLen: 1, + generic: true, + }, + { + name: "CvtBoolToUint8", + argLen: 1, + generic: true, + }, + { + name: "Round32F", + argLen: 1, + generic: true, + }, + { + name: "Round64F", + argLen: 1, + generic: true, + }, + { + name: "IsNonNil", + argLen: 1, + generic: true, + }, + { + name: "IsInBounds", + argLen: 2, + generic: true, + }, + { + name: "IsSliceInBounds", + argLen: 2, + generic: true, + }, + { + name: "NilCheck", + argLen: 2, + nilCheck: true, + generic: true, + }, + { + name: "GetG", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "GetClosurePtr", + argLen: 0, + generic: true, + }, + { + name: "GetCallerPC", + argLen: 0, + generic: true, + }, + { + name: "GetCallerSP", + argLen: 1, + generic: true, + }, + { + name: "PtrIndex", + argLen: 2, + generic: true, + }, + { + name: "OffPtr", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "SliceMake", + argLen: 3, + generic: true, + }, + { + name: "SlicePtr", + argLen: 1, + generic: true, + }, + { + name: "SliceLen", + argLen: 1, + generic: true, + }, + { + name: "SliceCap", + argLen: 1, + generic: true, + }, + { + name: "SlicePtrUnchecked", + argLen: 1, + generic: true, + }, + { + name: "ComplexMake", + argLen: 2, + generic: true, + }, + { + name: "ComplexReal", + argLen: 1, + generic: true, + }, + { + name: "ComplexImag", + argLen: 1, + generic: true, + }, + { + name: "StringMake", + argLen: 2, + generic: true, + }, + { + name: "StringPtr", + argLen: 1, + generic: true, + }, + { + name: "StringLen", + argLen: 1, + generic: true, + }, + { + name: "IMake", + argLen: 2, + generic: true, + }, + { + name: "ITab", + argLen: 1, + generic: true, + }, + { + name: "IData", + argLen: 1, + generic: true, + }, + { + name: "StructMake", + argLen: -1, + generic: true, + }, + { + name: "StructSelect", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "ArrayMake0", + argLen: 0, + generic: true, + }, + { + name: "ArrayMake1", + argLen: 1, + generic: true, + }, + { + name: "ArraySelect", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "StoreReg", + argLen: 1, + generic: true, + }, + { + name: "LoadReg", + argLen: 1, + generic: true, + }, + { + name: "FwdRef", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "Unknown", + argLen: 0, + generic: true, + }, + { + name: "VarDef", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymNone, + generic: true, + }, + { + name: "VarLive", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "KeepAlive", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "InlMark", + auxType: auxInt32, + argLen: 1, + generic: true, + }, + { + name: "Int64Make", + argLen: 2, + generic: true, + }, + { + name: "Int64Hi", + argLen: 1, + generic: true, + }, + { + name: "Int64Lo", + argLen: 1, + generic: true, + }, + { + name: "Add32carry", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32withcarry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub32carry", + argLen: 2, + generic: true, + }, + { + name: "Sub32withcarry", + argLen: 3, + generic: true, + }, + { + name: "Add64carry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub64borrow", + argLen: 3, + generic: true, + }, + { + name: "Signmask", + argLen: 1, + generic: true, + }, + { + name: "Zeromask", + argLen: 1, + generic: true, + }, + { + name: "Slicemask", + argLen: 1, + generic: true, + }, + { + name: "SpectreIndex", + argLen: 2, + generic: true, + }, + { + name: "SpectreSliceIndex", + argLen: 2, + generic: true, + }, + { + name: "Cvt32Uto32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Uto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto32U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Uto32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Uto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto64U", + argLen: 1, + generic: true, + }, + { + name: "Select0", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "Select1", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "MakeTuple", + argLen: 2, + generic: true, + }, + { + name: "SelectN", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "SelectNAddr", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "MakeResult", + argLen: -1, + generic: true, + }, + { + name: "AtomicLoad8", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoad32", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoad64", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadPtr", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadAcq32", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadAcq64", + argLen: 2, + generic: true, + }, + { + name: "AtomicStore8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStorePtrNoWB", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStoreRel32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStoreRel64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwapRel32", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd64value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr64value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "PubBarrier", + argLen: 1, + hasSideEffects: true, + generic: true, + }, + { + name: "Clobber", + auxType: auxSymOff, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "ClobberReg", + argLen: 0, + generic: true, + }, + { + name: "PrefetchCache", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "PrefetchCacheStreamed", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "Add32x4", + argLen: 2, + generic: true, + }, + { + name: "ZeroSIMD", + argLen: 0, + generic: true, + }, + { + name: "AddFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "DivFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float32x16", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SqrtFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "SubFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "XorFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "DivFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float32x4", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "SubFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "XorFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "DivFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float32x8", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "SubFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "XorFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x2", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "XorFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x4", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "XorFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x8", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "XorFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt16x16", + argLen: 1, + generic: true, + }, + { + name: "AddInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt16x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt16x16", + argLen: 2, + generic: true, + }, + { + name: "LessInt16x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulHighInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt16x16", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt16x16", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedPairwiseAddInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SaturatedPairwiseSubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SaturatedSubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SignInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "XorInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt16x32", + argLen: 1, + generic: true, + }, + { + name: "AddInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt16x32", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt16x32", + argLen: 2, + generic: true, + }, + { + name: "LessInt16x32", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulHighInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt16x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedSubInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteInt16x8", + argLen: 1, + generic: true, + }, + { + name: "AddInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt16x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt16x8", + argLen: 2, + generic: true, + }, + { + name: "LessInt16x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulHighInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt16x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt16x8", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedPairwiseAddInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SaturatedPairwiseSubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SaturatedSubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SignInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x16", + argLen: 1, + generic: true, + }, + { + name: "AddInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt32x16", + argLen: 2, + generic: true, + }, + { + name: "LessInt32x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt32x16", + argLen: 1, + generic: true, + }, + { + name: "SubInt32x16", + argLen: 2, + generic: true, + }, + { + name: "XorInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x4", + argLen: 1, + generic: true, + }, + { + name: "AddInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt32x4", + argLen: 2, + generic: true, + }, + { + name: "LessInt32x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt32x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt32x4", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt32x4", + argLen: 1, + generic: true, + }, + { + name: "SignInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SubInt32x4", + argLen: 2, + generic: true, + }, + { + name: "XorInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x8", + argLen: 1, + generic: true, + }, + { + name: "AddInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt32x8", + argLen: 2, + generic: true, + }, + { + name: "LessInt32x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt32x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt32x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt32x8", + argLen: 1, + generic: true, + }, + { + name: "SignInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt32x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x2", + argLen: 1, + generic: true, + }, + { + name: "AddInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x2", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt64x2", + argLen: 2, + generic: true, + }, + { + name: "LessInt64x2", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulEvenWidenInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x2", + argLen: 1, + generic: true, + }, + { + name: "SubInt64x2", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x4", + argLen: 1, + generic: true, + }, + { + name: "AddInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt64x4", + argLen: 2, + generic: true, + }, + { + name: "LessInt64x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulEvenWidenInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x4", + argLen: 1, + generic: true, + }, + { + name: "SubInt64x4", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x8", + argLen: 1, + generic: true, + }, + { + name: "AddInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt64x8", + argLen: 2, + generic: true, + }, + { + name: "LessInt64x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulEvenWidenInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x8", + argLen: 1, + generic: true, + }, + { + name: "SubInt64x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt8x16", + argLen: 1, + generic: true, + }, + { + name: "AddInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt8x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt8x16", + argLen: 2, + generic: true, + }, + { + name: "LessInt8x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt8x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt8x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt8x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt8x16", + argLen: 3, + generic: true, }, - { - name: "Add8", + name: "MaskedSubInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaxInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add16", + name: "MinInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add32", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add64", + name: "OrInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddPtr", - argLen: 2, + name: "PopCountInt8x16", + argLen: 1, generic: true, }, { - name: "Add32F", + name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add64F", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedSubInt8x16", + argLen: 2, + generic: true, }, { - name: "Sub8", + name: "SignInt8x16", argLen: 2, generic: true, }, { - name: "Sub16", + name: "SubInt8x16", argLen: 2, generic: true, }, { - name: "Sub32", + name: "XorInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt8x32", + argLen: 1, + generic: true, + }, + { + name: "AddInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt8x32", argLen: 2, generic: true, }, { - name: "Sub64", + name: "GreaterEqualInt8x32", argLen: 2, generic: true, }, { - name: "SubPtr", + name: "LessInt8x32", argLen: 2, generic: true, }, { - name: "Sub32F", + name: "LessEqualInt8x32", argLen: 2, generic: true, }, { - name: "Sub64F", + name: "MaskedAbsoluteInt8x32", argLen: 2, generic: true, }, { - name: "Mul8", - argLen: 2, + name: "MaskedAddInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul16", - argLen: 2, + name: "MaskedEqualInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul32", - argLen: 2, + name: "MaskedGreaterInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul64", - argLen: 2, + name: "MaskedMinInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul32F", - argLen: 2, + name: "MaskedNotEqualInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul64F", - argLen: 2, + name: "MaskedPopCountInt8x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Div32F", - argLen: 2, + name: "MaskedSaturatedSubInt8x32", + argLen: 3, generic: true, }, { - name: "Div64F", - argLen: 2, + name: "MaskedSubInt8x32", + argLen: 3, generic: true, }, { - name: "Hmul32", + name: "MaxInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul32u", + name: "MinInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64", + name: "NotEqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64u", + name: "OrInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Mul32uhilo", + name: "PopCountInt8x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Mul64uhilo", + name: "SaturatedSubInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SignInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SubInt8x32", + argLen: 2, + generic: true, + }, + { + name: "XorInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Mul32uover", + name: "AbsoluteInt8x64", + argLen: 1, + generic: true, + }, + { + name: "AddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "Mul64uover", + name: "EqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "Avg32u", + name: "GreaterInt8x64", argLen: 2, generic: true, }, { - name: "Avg64u", + name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, { - name: "Div8", + name: "LessInt8x64", argLen: 2, generic: true, }, { - name: "Div8u", + name: "LessEqualInt8x64", argLen: 2, generic: true, }, { - name: "Div16", - auxType: auxBool, + name: "MaskedAbsoluteInt8x64", argLen: 2, generic: true, }, { - name: "Div16u", - argLen: 2, + name: "MaskedAddInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt8x64", + argLen: 3, generic: true, }, { - name: "Div32", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterEqualInt8x64", + argLen: 3, generic: true, }, { - name: "Div32u", - argLen: 2, + name: "MaskedLessInt8x64", + argLen: 3, generic: true, }, { - name: "Div64", - auxType: auxBool, - argLen: 2, + name: "MaskedLessEqualInt8x64", + argLen: 3, generic: true, }, { - name: "Div64u", + name: "MaskedMaxInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt8x64", argLen: 2, generic: true, }, { - name: "Div128u", + name: "MaskedSaturatedAddInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt8x64", argLen: 3, generic: true, }, { - name: "Mod8", - argLen: 2, + name: "MaskedSubInt8x64", + argLen: 3, generic: true, }, { - name: "Mod8u", - argLen: 2, + name: "MaxInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt8x64", + argLen: 1, generic: true, }, { - name: "Mod16", - auxType: auxBool, + name: "SaturatedAddInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedSubInt8x64", argLen: 2, generic: true, }, { - name: "Mod16u", + name: "SubInt8x64", argLen: 2, generic: true, }, { - name: "Mod32", - auxType: auxBool, + name: "AddUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AverageUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "Mod32u", + name: "GreaterEqualUint16x16", argLen: 2, generic: true, }, { - name: "Mod64", - auxType: auxBool, + name: "LessUint16x16", argLen: 2, generic: true, }, { - name: "Mod64u", + name: "LessEqualUint16x16", argLen: 2, generic: true, }, { - name: "And8", - argLen: 2, + name: "MaskedAddUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "And16", - argLen: 2, + name: "MaskedAverageUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "And32", - argLen: 2, + name: "MaskedEqualUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "And64", - argLen: 2, + name: "MaskedGreaterUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or8", - argLen: 2, + name: "MaskedMinUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or16", - argLen: 2, + name: "MaskedMulHighUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or32", - argLen: 2, + name: "MaskedNotEqualUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or64", + name: "MaskedPopCountUint16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor8", + name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor16", + name: "MulHighUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor32", + name: "NotEqualUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor64", + name: "OrUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Lsh8x8", - auxType: auxBool, + name: "PairwiseAddUint16x16", argLen: 2, generic: true, }, { - name: "Lsh8x16", - auxType: auxBool, + name: "PairwiseSubUint16x16", argLen: 2, generic: true, }, { - name: "Lsh8x32", - auxType: auxBool, - argLen: 2, + name: "PopCountUint16x16", + argLen: 1, generic: true, }, { - name: "Lsh8x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "SaturatedAddUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh16x8", - auxType: auxBool, + name: "SaturatedSubUint16x16", argLen: 2, generic: true, }, { - name: "Lsh16x16", - auxType: auxBool, + name: "SubUint16x16", argLen: 2, generic: true, }, { - name: "Lsh16x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "XorUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh16x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "AddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh32x8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "AverageUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh32x16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "EqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh32x32", - auxType: auxBool, + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "Lsh32x64", - auxType: auxBool, + name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, { - name: "Lsh64x8", - auxType: auxBool, + name: "LessUint16x32", argLen: 2, generic: true, }, { - name: "Lsh64x16", - auxType: auxBool, + name: "LessEqualUint16x32", argLen: 2, generic: true, }, { - name: "Lsh64x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAddUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Lsh64x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAverageUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh8x8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedEqualUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh8x16", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh8x32", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterEqualUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh8x64", - auxType: auxBool, - argLen: 2, + name: "MaskedLessUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh16x8", - auxType: auxBool, - argLen: 2, + name: "MaskedLessEqualUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh16x16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedMaxUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh16x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedMinUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh16x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedMulHighUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32x8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedNotEqualUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32x16", - auxType: auxBool, + name: "MaskedPopCountUint16x32", argLen: 2, generic: true, }, { - name: "Rsh32x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedSaturatedAddUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32x64", - auxType: auxBool, - argLen: 2, + name: "MaskedSaturatedSubUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh64x8", - auxType: auxBool, - argLen: 2, + name: "MaskedSubUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh64x16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaxUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh64x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MinUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh64x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MulHighUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh8Ux8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "NotEqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh8Ux16", - auxType: auxBool, - argLen: 2, + name: "PopCountUint16x32", + argLen: 1, generic: true, }, { - name: "Rsh8Ux32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "SaturatedAddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh8Ux64", - auxType: auxBool, + name: "SaturatedSubUint16x32", argLen: 2, generic: true, }, { - name: "Rsh16Ux8", - auxType: auxBool, + name: "SubUint16x32", argLen: 2, generic: true, }, { - name: "Rsh16Ux16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "AddUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AverageUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh16Ux32", - auxType: auxBool, + name: "GreaterUint16x8", argLen: 2, generic: true, }, { - name: "Rsh16Ux64", - auxType: auxBool, + name: "GreaterEqualUint16x8", argLen: 2, generic: true, }, { - name: "Rsh32Ux8", - auxType: auxBool, + name: "LessUint16x8", argLen: 2, generic: true, }, { - name: "Rsh32Ux16", - auxType: auxBool, + name: "LessEqualUint16x8", argLen: 2, generic: true, }, { - name: "Rsh32Ux32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAddUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32Ux64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAverageUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh64Ux8", - auxType: auxBool, - argLen: 2, + name: "MaskedEqualUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint16x8", + argLen: 3, generic: true, }, { - name: "Rsh64Ux16", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterEqualUint16x8", + argLen: 3, generic: true, }, { - name: "Rsh64Ux32", - auxType: auxBool, - argLen: 2, + name: "MaskedLessUint16x8", + argLen: 3, generic: true, }, { - name: "Rsh64Ux64", - auxType: auxBool, - argLen: 2, + name: "MaskedLessEqualUint16x8", + argLen: 3, generic: true, }, { - name: "Eq8", - argLen: 2, + name: "MaskedMaxUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Eq16", - argLen: 2, + name: "MaskedMinUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Eq32", - argLen: 2, + name: "MaskedMulHighUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Eq64", - argLen: 2, + name: "MaskedNotEqualUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqPtr", - argLen: 2, + name: "MaskedPopCountUint16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqInter", - argLen: 2, + name: "MaskedSaturatedSubUint16x8", + argLen: 3, generic: true, }, { - name: "EqSlice", - argLen: 2, + name: "MaskedSubUint16x8", + argLen: 3, generic: true, }, { - name: "Eq32F", + name: "MaxUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Eq64F", + name: "MinUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq8", + name: "MulHighUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq16", + name: "NotEqualUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq32", + name: "OrUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddUint16x8", + argLen: 2, + generic: true, }, { - name: "NeqPtr", + name: "PairwiseSubUint16x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountUint16x8", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "NeqInter", + name: "SaturatedSubUint16x8", argLen: 2, generic: true, }, { - name: "NeqSlice", + name: "SubUint16x8", argLen: 2, generic: true, }, { - name: "Neq32F", + name: "XorUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq64F", + name: "AddUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "Less8", - argLen: 2, - generic: true, + name: "AndUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Less8U", - argLen: 2, - generic: true, + name: "AndNotUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Less16", - argLen: 2, - generic: true, + name: "EqualUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Less16U", + name: "GreaterUint32x16", argLen: 2, generic: true, }, { - name: "Less32", + name: "GreaterEqualUint32x16", argLen: 2, generic: true, }, { - name: "Less32U", + name: "LessUint32x16", argLen: 2, generic: true, }, { - name: "Less64", + name: "LessEqualUint32x16", argLen: 2, generic: true, }, { - name: "Less64U", - argLen: 2, - generic: true, + name: "MaskedAddUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Less32F", - argLen: 2, - generic: true, + name: "MaskedAndUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Less64F", - argLen: 2, - generic: true, + name: "MaskedAndNotUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq8", - argLen: 2, - generic: true, + name: "MaskedEqualUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq8U", - argLen: 2, + name: "MaskedGreaterUint32x16", + argLen: 3, generic: true, }, { - name: "Leq16", - argLen: 2, + name: "MaskedGreaterEqualUint32x16", + argLen: 3, generic: true, }, { - name: "Leq16U", - argLen: 2, + name: "MaskedLessUint32x16", + argLen: 3, generic: true, }, { - name: "Leq32", - argLen: 2, + name: "MaskedLessEqualUint32x16", + argLen: 3, generic: true, }, { - name: "Leq32U", - argLen: 2, - generic: true, + name: "MaskedMaxUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq64", - argLen: 2, - generic: true, + name: "MaskedMinUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq64U", - argLen: 2, - generic: true, + name: "MaskedNotEqualUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq32F", - argLen: 2, - generic: true, + name: "MaskedOrUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq64F", + name: "MaskedPopCountUint32x16", argLen: 2, generic: true, }, { - name: "CondSelect", + name: "MaskedSubUint32x16", argLen: 3, generic: true, }, { - name: "AndB", - argLen: 2, + name: "MaskedXorUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "OrB", + name: "MaxUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "EqB", + name: "MinUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "NeqB", + name: "NotEqualUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "Not", - argLen: 1, - generic: true, - }, - { - name: "Neg8", - argLen: 1, - generic: true, - }, - { - name: "Neg16", - argLen: 1, - generic: true, - }, - { - name: "Neg32", - argLen: 1, - generic: true, - }, - { - name: "Neg64", - argLen: 1, - generic: true, - }, - { - name: "Neg32F", - argLen: 1, - generic: true, + name: "OrUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Neg64F", + name: "PopCountUint32x16", argLen: 1, generic: true, }, { - name: "Com8", - argLen: 1, + name: "SubUint32x16", + argLen: 2, generic: true, }, { - name: "Com16", - argLen: 1, - generic: true, + name: "XorUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Com32", - argLen: 1, - generic: true, + name: "AddUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Com64", - argLen: 1, - generic: true, + name: "AndUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ctz8", - argLen: 1, - generic: true, + name: "AndNotUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ctz16", - argLen: 1, - generic: true, + name: "EqualUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ctz32", - argLen: 1, + name: "GreaterUint32x4", + argLen: 2, generic: true, }, { - name: "Ctz64", - argLen: 1, + name: "GreaterEqualUint32x4", + argLen: 2, generic: true, }, { - name: "Ctz64On32", + name: "LessUint32x4", argLen: 2, generic: true, }, { - name: "Ctz8NonZero", - argLen: 1, + name: "LessEqualUint32x4", + argLen: 2, generic: true, }, { - name: "Ctz16NonZero", - argLen: 1, - generic: true, + name: "MaskedAddUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Ctz32NonZero", - argLen: 1, - generic: true, + name: "MaskedAndUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Ctz64NonZero", - argLen: 1, - generic: true, + name: "MaskedAndNotUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitLen8", - argLen: 1, - generic: true, + name: "MaskedEqualUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitLen16", - argLen: 1, + name: "MaskedGreaterUint32x4", + argLen: 3, generic: true, }, { - name: "BitLen32", - argLen: 1, + name: "MaskedGreaterEqualUint32x4", + argLen: 3, generic: true, }, { - name: "BitLen64", - argLen: 1, + name: "MaskedLessUint32x4", + argLen: 3, generic: true, }, { - name: "Bswap16", - argLen: 1, + name: "MaskedLessEqualUint32x4", + argLen: 3, generic: true, }, { - name: "Bswap32", - argLen: 1, - generic: true, + name: "MaskedMaxUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Bswap64", - argLen: 1, - generic: true, + name: "MaskedMinUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitRev8", - argLen: 1, - generic: true, + name: "MaskedNotEqualUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitRev16", - argLen: 1, - generic: true, + name: "MaskedOrUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitRev32", - argLen: 1, + name: "MaskedPopCountUint32x4", + argLen: 2, generic: true, }, { - name: "BitRev64", - argLen: 1, + name: "MaskedSubUint32x4", + argLen: 3, generic: true, }, { - name: "PopCount8", - argLen: 1, - generic: true, + name: "MaskedXorUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCount16", - argLen: 1, - generic: true, + name: "MaxUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCount32", - argLen: 1, - generic: true, + name: "MinUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCount64", - argLen: 1, - generic: true, + name: "MulEvenWidenUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeft64", - argLen: 2, - generic: true, + name: "NotEqualUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeft32", - argLen: 2, - generic: true, + name: "OrUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeft16", + name: "PairwiseAddUint32x4", argLen: 2, generic: true, }, { - name: "RotateLeft8", + name: "PairwiseSubUint32x4", argLen: 2, generic: true, }, { - name: "Sqrt", + name: "PopCountUint32x4", argLen: 1, generic: true, }, { - name: "Sqrt32", - argLen: 1, + name: "SubUint32x4", + argLen: 2, generic: true, }, { - name: "Floor", - argLen: 1, - generic: true, + name: "XorUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ceil", - argLen: 1, - generic: true, + name: "AddUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Trunc", - argLen: 1, - generic: true, + name: "AndUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round", - argLen: 1, - generic: true, + name: "AndNotUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RoundToEven", - argLen: 1, - generic: true, + name: "EqualUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Abs", - argLen: 1, + name: "GreaterUint32x8", + argLen: 2, generic: true, }, { - name: "Copysign", + name: "GreaterEqualUint32x8", argLen: 2, generic: true, }, { - name: "Min64", + name: "LessUint32x8", argLen: 2, generic: true, }, { - name: "Max64", + name: "LessEqualUint32x8", argLen: 2, generic: true, }, { - name: "Min64u", - argLen: 2, - generic: true, + name: "MaskedAddUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Max64u", - argLen: 2, - generic: true, + name: "MaskedAndUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Min64F", - argLen: 2, - generic: true, + name: "MaskedAndNotUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Min32F", - argLen: 2, + name: "MaskedEqualUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint32x8", + argLen: 3, generic: true, }, { - name: "Max64F", - argLen: 2, + name: "MaskedGreaterEqualUint32x8", + argLen: 3, generic: true, }, { - name: "Max32F", - argLen: 2, + name: "MaskedLessUint32x8", + argLen: 3, generic: true, }, { - name: "FMA", + name: "MaskedLessEqualUint32x8", argLen: 3, generic: true, }, { - name: "Phi", - argLen: -1, - zeroWidth: true, - generic: true, + name: "MaskedMaxUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Copy", - argLen: 1, - generic: true, + name: "MaskedMinUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Convert", - argLen: 2, - resultInArg0: true, - zeroWidth: true, - generic: true, + name: "MaskedNotEqualUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ConstBool", - auxType: auxBool, - argLen: 0, - generic: true, + name: "MaskedOrUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ConstString", - auxType: auxString, - argLen: 0, + name: "MaskedPopCountUint32x8", + argLen: 2, generic: true, }, { - name: "ConstNil", - argLen: 0, + name: "MaskedSubUint32x8", + argLen: 3, generic: true, }, { - name: "Const8", - auxType: auxInt8, - argLen: 0, - generic: true, + name: "MaskedXorUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Const16", - auxType: auxInt16, - argLen: 0, - generic: true, + name: "MaxUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const32", - auxType: auxInt32, - argLen: 0, - generic: true, + name: "MinUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const64", - auxType: auxInt64, - argLen: 0, - generic: true, + name: "MulEvenWidenUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const32F", - auxType: auxFloat32, - argLen: 0, - generic: true, + name: "NotEqualUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const64F", - auxType: auxFloat64, - argLen: 0, - generic: true, + name: "OrUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ConstInterface", - argLen: 0, + name: "PairwiseAddUint32x8", + argLen: 2, generic: true, }, { - name: "ConstSlice", - argLen: 0, + name: "PairwiseSubUint32x8", + argLen: 2, generic: true, }, { - name: "InitMem", - argLen: 0, - zeroWidth: true, - generic: true, + name: "PopCountUint32x8", + argLen: 1, + generic: true, }, { - name: "Arg", - auxType: auxSymOff, - argLen: 0, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "SubUint32x8", + argLen: 2, + generic: true, }, { - name: "ArgIntReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "XorUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ArgFloatReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "AddUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Addr", - auxType: auxSym, - argLen: 1, - symEffect: SymAddr, - generic: true, + name: "AndUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LocalAddr", - auxType: auxSym, - argLen: 2, - symEffect: SymAddr, - generic: true, + name: "AndNotUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SP", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "EqualUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SB", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "GreaterUint64x2", + argLen: 2, + generic: true, }, { - name: "SPanchored", - argLen: 2, - zeroWidth: true, - generic: true, + name: "GreaterEqualUint64x2", + argLen: 2, + generic: true, }, { - name: "Load", + name: "LessUint64x2", argLen: 2, generic: true, }, { - name: "Dereference", + name: "LessEqualUint64x2", argLen: 2, generic: true, }, { - name: "Store", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "MaskedAddUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Move", - auxType: auxTypSize, - argLen: 3, - generic: true, + name: "MaskedAndUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Zero", - auxType: auxTypSize, - argLen: 2, - generic: true, + name: "MaskedAndNotUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreWB", - auxType: auxTyp, + name: "MaskedEqualUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint64x2", argLen: 3, generic: true, }, { - name: "MoveWB", - auxType: auxTypSize, + name: "MaskedGreaterEqualUint64x2", argLen: 3, generic: true, }, { - name: "ZeroWB", - auxType: auxTypSize, - argLen: 2, + name: "MaskedLessUint64x2", + argLen: 3, generic: true, }, { - name: "WBend", - argLen: 1, + name: "MaskedLessEqualUint64x2", + argLen: 3, generic: true, }, { - name: "WB", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "MaskedMaxUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "HasCPUFeature", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "MaskedMinUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PanicBounds", - auxType: auxInt64, - argLen: 3, - call: true, - generic: true, + name: "MaskedMulEvenWidenUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PanicExtend", - auxType: auxInt64, - argLen: 4, - call: true, - generic: true, + name: "MaskedNotEqualUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ClosureCall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MaskedOrUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StaticCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "MaskedPopCountUint64x2", + argLen: 2, generic: true, }, { - name: "InterCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "MaskedSubUint64x2", + argLen: 3, generic: true, }, { - name: "TailCall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MaskedXorUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ClosureLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MaxUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StaticLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MinUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "InterLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MulEvenWidenUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "TailLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "NotEqualUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt8to16", - argLen: 1, - generic: true, + name: "OrUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt8to32", + name: "PopCountUint64x2", argLen: 1, generic: true, }, { - name: "SignExt8to64", - argLen: 1, + name: "SubUint64x2", + argLen: 2, generic: true, }, { - name: "SignExt16to32", - argLen: 1, - generic: true, + name: "XorUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt16to64", - argLen: 1, - generic: true, + name: "AddUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt32to64", - argLen: 1, - generic: true, + name: "AndUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ZeroExt8to16", - argLen: 1, - generic: true, + name: "AndNotUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ZeroExt8to32", - argLen: 1, - generic: true, + name: "EqualUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ZeroExt8to64", - argLen: 1, + name: "GreaterUint64x4", + argLen: 2, generic: true, }, { - name: "ZeroExt16to32", - argLen: 1, + name: "GreaterEqualUint64x4", + argLen: 2, generic: true, }, { - name: "ZeroExt16to64", - argLen: 1, + name: "LessUint64x4", + argLen: 2, generic: true, }, { - name: "ZeroExt32to64", - argLen: 1, + name: "LessEqualUint64x4", + argLen: 2, generic: true, }, { - name: "Trunc16to8", - argLen: 1, - generic: true, + name: "MaskedAddUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc32to8", - argLen: 1, - generic: true, + name: "MaskedAndUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc32to16", - argLen: 1, - generic: true, + name: "MaskedAndNotUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc64to8", - argLen: 1, - generic: true, + name: "MaskedEqualUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc64to16", - argLen: 1, + name: "MaskedGreaterUint64x4", + argLen: 3, generic: true, }, { - name: "Trunc64to32", - argLen: 1, + name: "MaskedGreaterEqualUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt32to32F", - argLen: 1, + name: "MaskedLessUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt32to64F", - argLen: 1, + name: "MaskedLessEqualUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt64to32F", - argLen: 1, - generic: true, + name: "MaskedMaxUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64to64F", - argLen: 1, - generic: true, + name: "MaskedMinUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto32", - argLen: 1, - generic: true, + name: "MaskedMulEvenWidenUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto64", - argLen: 1, - generic: true, + name: "MaskedNotEqualUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32", - argLen: 1, - generic: true, + name: "MaskedOrUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto64", - argLen: 1, + name: "MaskedPopCountUint64x4", + argLen: 2, generic: true, }, { - name: "Cvt32Fto64F", - argLen: 1, + name: "MaskedSubUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt64Fto32F", - argLen: 1, - generic: true, + name: "MaskedXorUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CvtBoolToUint8", - argLen: 1, - generic: true, + name: "MaxUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round32F", - argLen: 1, - generic: true, + name: "MinUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round64F", - argLen: 1, - generic: true, + name: "MulEvenWidenUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsNonNil", + name: "NotEqualUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountUint64x4", argLen: 1, generic: true, }, { - name: "IsInBounds", + name: "SubUint64x4", argLen: 2, generic: true, }, { - name: "IsSliceInBounds", - argLen: 2, - generic: true, + name: "XorUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "NilCheck", - argLen: 2, - nilCheck: true, - generic: true, + name: "AddUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetG", - argLen: 1, - zeroWidth: true, - generic: true, + name: "AndUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetClosurePtr", - argLen: 0, - generic: true, + name: "AndNotUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetCallerPC", - argLen: 0, - generic: true, + name: "EqualUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetCallerSP", - argLen: 1, + name: "GreaterUint64x8", + argLen: 2, generic: true, }, { - name: "PtrIndex", + name: "GreaterEqualUint64x8", argLen: 2, generic: true, }, { - name: "OffPtr", - auxType: auxInt64, - argLen: 1, + name: "LessUint64x8", + argLen: 2, generic: true, }, { - name: "SliceMake", - argLen: 3, + name: "LessEqualUint64x8", + argLen: 2, generic: true, }, { - name: "SlicePtr", - argLen: 1, - generic: true, + name: "MaskedAddUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SliceLen", - argLen: 1, - generic: true, + name: "MaskedAndUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SliceCap", - argLen: 1, - generic: true, + name: "MaskedAndNotUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SlicePtrUnchecked", - argLen: 1, - generic: true, + name: "MaskedEqualUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ComplexMake", - argLen: 2, + name: "MaskedGreaterUint64x8", + argLen: 3, generic: true, }, { - name: "ComplexReal", - argLen: 1, + name: "MaskedGreaterEqualUint64x8", + argLen: 3, generic: true, }, { - name: "ComplexImag", - argLen: 1, + name: "MaskedLessUint64x8", + argLen: 3, generic: true, }, { - name: "StringMake", - argLen: 2, + name: "MaskedLessEqualUint64x8", + argLen: 3, generic: true, }, { - name: "StringPtr", - argLen: 1, - generic: true, + name: "MaskedMaxUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StringLen", - argLen: 1, - generic: true, + name: "MaskedMinUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "IMake", + name: "MaskedMulEvenWidenUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x8", argLen: 2, generic: true, }, { - name: "ITab", - argLen: 1, + name: "MaskedSubUint64x8", + argLen: 3, generic: true, }, { - name: "IData", - argLen: 1, - generic: true, + name: "MaskedXorUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StructMake", - argLen: -1, - generic: true, + name: "MaxUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StructSelect", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "MinUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ArrayMake0", - argLen: 0, - generic: true, + name: "MulEvenWidenUint64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualUint64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ArrayMake1", + name: "PopCountUint64x8", argLen: 1, generic: true, }, { - name: "ArraySelect", - auxType: auxInt64, - argLen: 1, + name: "SubUint64x8", + argLen: 2, generic: true, }, { - name: "StoreReg", - argLen: 1, - generic: true, + name: "XorUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LoadReg", - argLen: 1, - generic: true, + name: "AddUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FwdRef", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "AndUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Unknown", - argLen: 0, - generic: true, + name: "AndNotUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "VarDef", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymNone, - generic: true, + name: "AverageUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "VarLive", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "EqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "KeepAlive", - argLen: 2, - zeroWidth: true, - generic: true, + name: "GreaterUint8x16", + argLen: 2, + generic: true, }, { - name: "InlMark", - auxType: auxInt32, - argLen: 1, + name: "GreaterEqualUint8x16", + argLen: 2, generic: true, }, { - name: "Int64Make", + name: "LessUint8x16", argLen: 2, generic: true, }, { - name: "Int64Hi", - argLen: 1, + name: "LessEqualUint8x16", + argLen: 2, generic: true, }, { - name: "Int64Lo", - argLen: 1, - generic: true, + name: "MaskedAddUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Add32carry", - argLen: 2, + name: "MaskedAverageUint8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Add32withcarry", + name: "MaskedEqualUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "Sub32carry", - argLen: 2, + name: "MaskedGreaterUint8x16", + argLen: 3, generic: true, }, { - name: "Sub32withcarry", + name: "MaskedGreaterEqualUint8x16", argLen: 3, generic: true, }, { - name: "Add64carry", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedLessUint8x16", + argLen: 3, + generic: true, }, { - name: "Sub64borrow", + name: "MaskedLessEqualUint8x16", argLen: 3, generic: true, }, { - name: "Signmask", - argLen: 1, - generic: true, + name: "MaskedMaxUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Zeromask", - argLen: 1, - generic: true, + name: "MaskedMinUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Slicemask", - argLen: 1, - generic: true, + name: "MaskedNotEqualUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SpectreIndex", + name: "MaskedPopCountUint8x16", argLen: 2, generic: true, }, { - name: "SpectreSliceIndex", - argLen: 2, - generic: true, + name: "MaskedSaturatedAddUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Uto32F", - argLen: 1, + name: "MaskedSaturatedSubUint8x16", + argLen: 3, generic: true, }, { - name: "Cvt32Uto64F", - argLen: 1, + name: "MaskedSubUint8x16", + argLen: 3, generic: true, }, { - name: "Cvt32Fto32U", - argLen: 1, - generic: true, + name: "MaxUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32U", - argLen: 1, - generic: true, + name: "MinUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Uto32F", - argLen: 1, - generic: true, + name: "NotEqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Uto64F", + name: "OrUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountUint8x16", argLen: 1, generic: true, }, { - name: "Cvt32Fto64U", - argLen: 1, + name: "SaturatedAddUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedSubUint8x16", + argLen: 2, generic: true, }, { - name: "Cvt64Fto64U", - argLen: 1, + name: "SubUint8x16", + argLen: 2, generic: true, }, { - name: "Select0", - argLen: 1, - zeroWidth: true, - generic: true, + name: "XorUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Select1", - argLen: 1, - zeroWidth: true, - generic: true, + name: "AddUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MakeTuple", - argLen: 2, - generic: true, + name: "AndUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SelectN", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "AndNotUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SelectNAddr", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "AverageUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MakeResult", - argLen: -1, - generic: true, + name: "EqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicLoad8", + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoad32", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoad64", + name: "LessUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoadPtr", + name: "LessEqualUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoadAcq32", - argLen: 2, - generic: true, + name: "MaskedAddUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicLoadAcq64", - argLen: 2, + name: "MaskedAverageUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint8x32", + argLen: 3, generic: true, }, { - name: "AtomicStore8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterEqualUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicStore32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicStore64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessEqualUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicStorePtrNoWB", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMaxUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicStoreRel32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMinUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicStoreRel64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedNotEqualUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedPopCountUint8x32", + argLen: 2, + generic: true, }, { - name: "AtomicExchange32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedAddUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedSubUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicAdd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSubUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicAdd64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaxUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MinUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap64", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "NotEqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwapRel32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "OrUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "PopCountUint8x32", + argLen: 1, + generic: true, }, { - name: "AtomicOr8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "SaturatedAddUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "SaturatedSubUint8x32", + argLen: 2, + generic: true, }, { - name: "AtomicOr32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "SubUint8x32", + argLen: 2, + generic: true, }, { - name: "AtomicAnd64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "XorUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AverageUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "EqualUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicOr8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterEqualUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicStore8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicStore32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessEqualUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicStore64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedAddUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedAverageUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedEqualUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicExchange32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterEqualUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicExchange64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicCompareAndSwap32Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedLessEqualUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicCompareAndSwap64Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedMaxUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMinUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicOr64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedNotEqualUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedPopCountUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicOr32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedAddUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedSubUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicOr8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSubUint8x64", + argLen: 3, + generic: true, }, { - name: "PubBarrier", - argLen: 1, - hasSideEffects: true, - generic: true, + name: "MaxUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Clobber", - auxType: auxSymOff, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "MinUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ClobberReg", - argLen: 0, - generic: true, + name: "NotEqualUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PrefetchCache", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "PopCountUint8x64", + argLen: 1, + generic: true, }, { - name: "PrefetchCacheStreamed", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "SaturatedAddUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Add32x4", + name: "SaturatedSubUint8x64", argLen: 2, generic: true, }, { - name: "ZeroSIMD", - argLen: 0, + name: "SubUint8x64", + argLen: 2, generic: true, }, } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3afcfe153a1654..88c90dce82ae21 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -553,6 +553,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64XORQload(v) case OpAMD64XORQmodify: return rewriteValueAMD64_OpAMD64XORQmodify(v) + case OpAbsoluteInt16x16: + return rewriteValueAMD64_OpAbsoluteInt16x16(v) + case OpAbsoluteInt16x32: + return rewriteValueAMD64_OpAbsoluteInt16x32(v) + case OpAbsoluteInt16x8: + return rewriteValueAMD64_OpAbsoluteInt16x8(v) + case OpAbsoluteInt32x16: + return rewriteValueAMD64_OpAbsoluteInt32x16(v) + case OpAbsoluteInt32x4: + return rewriteValueAMD64_OpAbsoluteInt32x4(v) + case OpAbsoluteInt32x8: + return rewriteValueAMD64_OpAbsoluteInt32x8(v) + case OpAbsoluteInt64x2: + return rewriteValueAMD64_OpAbsoluteInt64x2(v) + case OpAbsoluteInt64x4: + return rewriteValueAMD64_OpAbsoluteInt64x4(v) + case OpAbsoluteInt64x8: + return rewriteValueAMD64_OpAbsoluteInt64x8(v) + case OpAbsoluteInt8x16: + return rewriteValueAMD64_OpAbsoluteInt8x16(v) + case OpAbsoluteInt8x32: + return rewriteValueAMD64_OpAbsoluteInt8x32(v) + case OpAbsoluteInt8x64: + return rewriteValueAMD64_OpAbsoluteInt8x64(v) case OpAdd16: v.Op = OpAMD64ADDL return true @@ -571,9 +595,69 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true + case OpAddFloat32x16: + return rewriteValueAMD64_OpAddFloat32x16(v) + case OpAddFloat32x4: + return rewriteValueAMD64_OpAddFloat32x4(v) + case OpAddFloat32x8: + return rewriteValueAMD64_OpAddFloat32x8(v) + case OpAddFloat64x2: + return rewriteValueAMD64_OpAddFloat64x2(v) + case OpAddFloat64x4: + return rewriteValueAMD64_OpAddFloat64x4(v) + case OpAddFloat64x8: + return rewriteValueAMD64_OpAddFloat64x8(v) + case OpAddInt16x16: + return rewriteValueAMD64_OpAddInt16x16(v) + case OpAddInt16x32: + return rewriteValueAMD64_OpAddInt16x32(v) + case OpAddInt16x8: + return rewriteValueAMD64_OpAddInt16x8(v) + case OpAddInt32x16: + return rewriteValueAMD64_OpAddInt32x16(v) + case OpAddInt32x4: + return rewriteValueAMD64_OpAddInt32x4(v) + case OpAddInt32x8: + return rewriteValueAMD64_OpAddInt32x8(v) + case OpAddInt64x2: + return rewriteValueAMD64_OpAddInt64x2(v) + case OpAddInt64x4: + return rewriteValueAMD64_OpAddInt64x4(v) + case OpAddInt64x8: + return rewriteValueAMD64_OpAddInt64x8(v) + case OpAddInt8x16: + return rewriteValueAMD64_OpAddInt8x16(v) + case OpAddInt8x32: + return rewriteValueAMD64_OpAddInt8x32(v) + case OpAddInt8x64: + return rewriteValueAMD64_OpAddInt8x64(v) case OpAddPtr: v.Op = OpAMD64ADDQ return true + case OpAddUint16x16: + return rewriteValueAMD64_OpAddUint16x16(v) + case OpAddUint16x32: + return rewriteValueAMD64_OpAddUint16x32(v) + case OpAddUint16x8: + return rewriteValueAMD64_OpAddUint16x8(v) + case OpAddUint32x16: + return rewriteValueAMD64_OpAddUint32x16(v) + case OpAddUint32x4: + return rewriteValueAMD64_OpAddUint32x4(v) + case OpAddUint32x8: + return rewriteValueAMD64_OpAddUint32x8(v) + case OpAddUint64x2: + return rewriteValueAMD64_OpAddUint64x2(v) + case OpAddUint64x4: + return rewriteValueAMD64_OpAddUint64x4(v) + case OpAddUint64x8: + return rewriteValueAMD64_OpAddUint64x8(v) + case OpAddUint8x16: + return rewriteValueAMD64_OpAddUint8x16(v) + case OpAddUint8x32: + return rewriteValueAMD64_OpAddUint8x32(v) + case OpAddUint8x64: + return rewriteValueAMD64_OpAddUint8x64(v) case OpAddr: return rewriteValueAMD64_OpAddr(v) case OpAnd16: @@ -591,6 +675,134 @@ func rewriteValueAMD64(v *Value) bool { case OpAndB: v.Op = OpAMD64ANDL return true + case OpAndFloat32x16: + return rewriteValueAMD64_OpAndFloat32x16(v) + case OpAndFloat32x4: + return rewriteValueAMD64_OpAndFloat32x4(v) + case OpAndFloat32x8: + return rewriteValueAMD64_OpAndFloat32x8(v) + case OpAndFloat64x2: + return rewriteValueAMD64_OpAndFloat64x2(v) + case OpAndFloat64x4: + return rewriteValueAMD64_OpAndFloat64x4(v) + case OpAndFloat64x8: + return rewriteValueAMD64_OpAndFloat64x8(v) + case OpAndInt16x16: + return rewriteValueAMD64_OpAndInt16x16(v) + case OpAndInt16x8: + return rewriteValueAMD64_OpAndInt16x8(v) + case OpAndInt32x16: + return rewriteValueAMD64_OpAndInt32x16(v) + case OpAndInt32x4: + return rewriteValueAMD64_OpAndInt32x4(v) + case OpAndInt32x8: + return rewriteValueAMD64_OpAndInt32x8(v) + case OpAndInt64x2: + return rewriteValueAMD64_OpAndInt64x2(v) + case OpAndInt64x4: + return rewriteValueAMD64_OpAndInt64x4(v) + case OpAndInt64x8: + return rewriteValueAMD64_OpAndInt64x8(v) + case OpAndInt8x16: + return rewriteValueAMD64_OpAndInt8x16(v) + case OpAndInt8x32: + return rewriteValueAMD64_OpAndInt8x32(v) + case OpAndNotFloat32x16: + return rewriteValueAMD64_OpAndNotFloat32x16(v) + case OpAndNotFloat32x4: + return rewriteValueAMD64_OpAndNotFloat32x4(v) + case OpAndNotFloat32x8: + return rewriteValueAMD64_OpAndNotFloat32x8(v) + case OpAndNotFloat64x2: + return rewriteValueAMD64_OpAndNotFloat64x2(v) + case OpAndNotFloat64x4: + return rewriteValueAMD64_OpAndNotFloat64x4(v) + case OpAndNotFloat64x8: + return rewriteValueAMD64_OpAndNotFloat64x8(v) + case OpAndNotInt16x16: + return rewriteValueAMD64_OpAndNotInt16x16(v) + case OpAndNotInt16x8: + return rewriteValueAMD64_OpAndNotInt16x8(v) + case OpAndNotInt32x16: + return rewriteValueAMD64_OpAndNotInt32x16(v) + case OpAndNotInt32x4: + return rewriteValueAMD64_OpAndNotInt32x4(v) + case OpAndNotInt32x8: + return rewriteValueAMD64_OpAndNotInt32x8(v) + case OpAndNotInt64x2: + return rewriteValueAMD64_OpAndNotInt64x2(v) + case OpAndNotInt64x4: + return rewriteValueAMD64_OpAndNotInt64x4(v) + case OpAndNotInt64x8: + return rewriteValueAMD64_OpAndNotInt64x8(v) + case OpAndNotInt8x16: + return rewriteValueAMD64_OpAndNotInt8x16(v) + case OpAndNotInt8x32: + return rewriteValueAMD64_OpAndNotInt8x32(v) + case OpAndNotUint16x16: + return rewriteValueAMD64_OpAndNotUint16x16(v) + case OpAndNotUint16x8: + return rewriteValueAMD64_OpAndNotUint16x8(v) + case OpAndNotUint32x16: + return rewriteValueAMD64_OpAndNotUint32x16(v) + case OpAndNotUint32x4: + return rewriteValueAMD64_OpAndNotUint32x4(v) + case OpAndNotUint32x8: + return rewriteValueAMD64_OpAndNotUint32x8(v) + case OpAndNotUint64x2: + return rewriteValueAMD64_OpAndNotUint64x2(v) + case OpAndNotUint64x4: + return rewriteValueAMD64_OpAndNotUint64x4(v) + case OpAndNotUint64x8: + return rewriteValueAMD64_OpAndNotUint64x8(v) + case OpAndNotUint8x16: + return rewriteValueAMD64_OpAndNotUint8x16(v) + case OpAndNotUint8x32: + return rewriteValueAMD64_OpAndNotUint8x32(v) + case OpAndUint16x16: + return rewriteValueAMD64_OpAndUint16x16(v) + case OpAndUint16x8: + return rewriteValueAMD64_OpAndUint16x8(v) + case OpAndUint32x16: + return rewriteValueAMD64_OpAndUint32x16(v) + case OpAndUint32x4: + return rewriteValueAMD64_OpAndUint32x4(v) + case OpAndUint32x8: + return rewriteValueAMD64_OpAndUint32x8(v) + case OpAndUint64x2: + return rewriteValueAMD64_OpAndUint64x2(v) + case OpAndUint64x4: + return rewriteValueAMD64_OpAndUint64x4(v) + case OpAndUint64x8: + return rewriteValueAMD64_OpAndUint64x8(v) + case OpAndUint8x16: + return rewriteValueAMD64_OpAndUint8x16(v) + case OpAndUint8x32: + return rewriteValueAMD64_OpAndUint8x32(v) + case OpApproximateReciprocalFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v) + case OpApproximateReciprocalFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v) + case OpApproximateReciprocalFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v) + case OpApproximateReciprocalFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v) + case OpApproximateReciprocalFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v) + case OpApproximateReciprocalFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v) + case OpApproximateReciprocalOfSqrtFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v) + case OpApproximateReciprocalOfSqrtFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v) + case OpApproximateReciprocalOfSqrtFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v) + case OpApproximateReciprocalOfSqrtFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v) + case OpApproximateReciprocalOfSqrtFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v) + case OpApproximateReciprocalOfSqrtFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v) case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -637,6 +849,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAtomicStore8(v) case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) + case OpAverageUint16x16: + return rewriteValueAMD64_OpAverageUint16x16(v) + case OpAverageUint16x32: + return rewriteValueAMD64_OpAverageUint16x32(v) + case OpAverageUint16x8: + return rewriteValueAMD64_OpAverageUint16x8(v) + case OpAverageUint8x16: + return rewriteValueAMD64_OpAverageUint8x16(v) + case OpAverageUint8x32: + return rewriteValueAMD64_OpAverageUint8x32(v) + case OpAverageUint8x64: + return rewriteValueAMD64_OpAverageUint8x64(v) case OpAvg64u: v.Op = OpAMD64AVGQU return true @@ -769,6 +993,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiv8(v) case OpDiv8u: return rewriteValueAMD64_OpDiv8u(v) + case OpDivFloat32x16: + return rewriteValueAMD64_OpDivFloat32x16(v) + case OpDivFloat32x4: + return rewriteValueAMD64_OpDivFloat32x4(v) + case OpDivFloat32x8: + return rewriteValueAMD64_OpDivFloat32x8(v) + case OpDivFloat64x2: + return rewriteValueAMD64_OpDivFloat64x2(v) + case OpDivFloat64x4: + return rewriteValueAMD64_OpDivFloat64x4(v) + case OpDivFloat64x8: + return rewriteValueAMD64_OpDivFloat64x8(v) case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -785,6 +1021,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpEqB(v) case OpEqPtr: return rewriteValueAMD64_OpEqPtr(v) + case OpEqualFloat32x16: + return rewriteValueAMD64_OpEqualFloat32x16(v) + case OpEqualFloat32x4: + return rewriteValueAMD64_OpEqualFloat32x4(v) + case OpEqualFloat32x8: + return rewriteValueAMD64_OpEqualFloat32x8(v) + case OpEqualFloat64x2: + return rewriteValueAMD64_OpEqualFloat64x2(v) + case OpEqualFloat64x4: + return rewriteValueAMD64_OpEqualFloat64x4(v) + case OpEqualFloat64x8: + return rewriteValueAMD64_OpEqualFloat64x8(v) + case OpEqualInt16x16: + return rewriteValueAMD64_OpEqualInt16x16(v) + case OpEqualInt16x32: + return rewriteValueAMD64_OpEqualInt16x32(v) + case OpEqualInt16x8: + return rewriteValueAMD64_OpEqualInt16x8(v) + case OpEqualInt32x16: + return rewriteValueAMD64_OpEqualInt32x16(v) + case OpEqualInt32x4: + return rewriteValueAMD64_OpEqualInt32x4(v) + case OpEqualInt32x8: + return rewriteValueAMD64_OpEqualInt32x8(v) + case OpEqualInt64x2: + return rewriteValueAMD64_OpEqualInt64x2(v) + case OpEqualInt64x4: + return rewriteValueAMD64_OpEqualInt64x4(v) + case OpEqualInt64x8: + return rewriteValueAMD64_OpEqualInt64x8(v) + case OpEqualInt8x16: + return rewriteValueAMD64_OpEqualInt8x16(v) + case OpEqualInt8x32: + return rewriteValueAMD64_OpEqualInt8x32(v) + case OpEqualInt8x64: + return rewriteValueAMD64_OpEqualInt8x64(v) + case OpEqualUint16x16: + return rewriteValueAMD64_OpEqualUint16x16(v) + case OpEqualUint16x32: + return rewriteValueAMD64_OpEqualUint16x32(v) + case OpEqualUint16x8: + return rewriteValueAMD64_OpEqualUint16x8(v) + case OpEqualUint32x16: + return rewriteValueAMD64_OpEqualUint32x16(v) + case OpEqualUint32x4: + return rewriteValueAMD64_OpEqualUint32x4(v) + case OpEqualUint32x8: + return rewriteValueAMD64_OpEqualUint32x8(v) + case OpEqualUint64x2: + return rewriteValueAMD64_OpEqualUint64x2(v) + case OpEqualUint64x4: + return rewriteValueAMD64_OpEqualUint64x4(v) + case OpEqualUint64x8: + return rewriteValueAMD64_OpEqualUint64x8(v) + case OpEqualUint8x16: + return rewriteValueAMD64_OpEqualUint8x16(v) + case OpEqualUint8x32: + return rewriteValueAMD64_OpEqualUint8x32(v) + case OpEqualUint8x64: + return rewriteValueAMD64_OpEqualUint8x64(v) case OpFMA: return rewriteValueAMD64_OpFMA(v) case OpFloor: @@ -800,6 +1096,126 @@ func rewriteValueAMD64(v *Value) bool { return true case OpGetG: return rewriteValueAMD64_OpGetG(v) + case OpGreaterEqualFloat32x16: + return rewriteValueAMD64_OpGreaterEqualFloat32x16(v) + case OpGreaterEqualFloat32x4: + return rewriteValueAMD64_OpGreaterEqualFloat32x4(v) + case OpGreaterEqualFloat32x8: + return rewriteValueAMD64_OpGreaterEqualFloat32x8(v) + case OpGreaterEqualFloat64x2: + return rewriteValueAMD64_OpGreaterEqualFloat64x2(v) + case OpGreaterEqualFloat64x4: + return rewriteValueAMD64_OpGreaterEqualFloat64x4(v) + case OpGreaterEqualFloat64x8: + return rewriteValueAMD64_OpGreaterEqualFloat64x8(v) + case OpGreaterEqualInt16x16: + return rewriteValueAMD64_OpGreaterEqualInt16x16(v) + case OpGreaterEqualInt16x32: + return rewriteValueAMD64_OpGreaterEqualInt16x32(v) + case OpGreaterEqualInt16x8: + return rewriteValueAMD64_OpGreaterEqualInt16x8(v) + case OpGreaterEqualInt32x16: + return rewriteValueAMD64_OpGreaterEqualInt32x16(v) + case OpGreaterEqualInt32x4: + return rewriteValueAMD64_OpGreaterEqualInt32x4(v) + case OpGreaterEqualInt32x8: + return rewriteValueAMD64_OpGreaterEqualInt32x8(v) + case OpGreaterEqualInt64x2: + return rewriteValueAMD64_OpGreaterEqualInt64x2(v) + case OpGreaterEqualInt64x4: + return rewriteValueAMD64_OpGreaterEqualInt64x4(v) + case OpGreaterEqualInt64x8: + return rewriteValueAMD64_OpGreaterEqualInt64x8(v) + case OpGreaterEqualInt8x16: + return rewriteValueAMD64_OpGreaterEqualInt8x16(v) + case OpGreaterEqualInt8x32: + return rewriteValueAMD64_OpGreaterEqualInt8x32(v) + case OpGreaterEqualInt8x64: + return rewriteValueAMD64_OpGreaterEqualInt8x64(v) + case OpGreaterEqualUint16x16: + return rewriteValueAMD64_OpGreaterEqualUint16x16(v) + case OpGreaterEqualUint16x32: + return rewriteValueAMD64_OpGreaterEqualUint16x32(v) + case OpGreaterEqualUint16x8: + return rewriteValueAMD64_OpGreaterEqualUint16x8(v) + case OpGreaterEqualUint32x16: + return rewriteValueAMD64_OpGreaterEqualUint32x16(v) + case OpGreaterEqualUint32x4: + return rewriteValueAMD64_OpGreaterEqualUint32x4(v) + case OpGreaterEqualUint32x8: + return rewriteValueAMD64_OpGreaterEqualUint32x8(v) + case OpGreaterEqualUint64x2: + return rewriteValueAMD64_OpGreaterEqualUint64x2(v) + case OpGreaterEqualUint64x4: + return rewriteValueAMD64_OpGreaterEqualUint64x4(v) + case OpGreaterEqualUint64x8: + return rewriteValueAMD64_OpGreaterEqualUint64x8(v) + case OpGreaterEqualUint8x16: + return rewriteValueAMD64_OpGreaterEqualUint8x16(v) + case OpGreaterEqualUint8x32: + return rewriteValueAMD64_OpGreaterEqualUint8x32(v) + case OpGreaterEqualUint8x64: + return rewriteValueAMD64_OpGreaterEqualUint8x64(v) + case OpGreaterFloat32x16: + return rewriteValueAMD64_OpGreaterFloat32x16(v) + case OpGreaterFloat32x4: + return rewriteValueAMD64_OpGreaterFloat32x4(v) + case OpGreaterFloat32x8: + return rewriteValueAMD64_OpGreaterFloat32x8(v) + case OpGreaterFloat64x2: + return rewriteValueAMD64_OpGreaterFloat64x2(v) + case OpGreaterFloat64x4: + return rewriteValueAMD64_OpGreaterFloat64x4(v) + case OpGreaterFloat64x8: + return rewriteValueAMD64_OpGreaterFloat64x8(v) + case OpGreaterInt16x16: + return rewriteValueAMD64_OpGreaterInt16x16(v) + case OpGreaterInt16x32: + return rewriteValueAMD64_OpGreaterInt16x32(v) + case OpGreaterInt16x8: + return rewriteValueAMD64_OpGreaterInt16x8(v) + case OpGreaterInt32x16: + return rewriteValueAMD64_OpGreaterInt32x16(v) + case OpGreaterInt32x4: + return rewriteValueAMD64_OpGreaterInt32x4(v) + case OpGreaterInt32x8: + return rewriteValueAMD64_OpGreaterInt32x8(v) + case OpGreaterInt64x2: + return rewriteValueAMD64_OpGreaterInt64x2(v) + case OpGreaterInt64x4: + return rewriteValueAMD64_OpGreaterInt64x4(v) + case OpGreaterInt64x8: + return rewriteValueAMD64_OpGreaterInt64x8(v) + case OpGreaterInt8x16: + return rewriteValueAMD64_OpGreaterInt8x16(v) + case OpGreaterInt8x32: + return rewriteValueAMD64_OpGreaterInt8x32(v) + case OpGreaterInt8x64: + return rewriteValueAMD64_OpGreaterInt8x64(v) + case OpGreaterUint16x16: + return rewriteValueAMD64_OpGreaterUint16x16(v) + case OpGreaterUint16x32: + return rewriteValueAMD64_OpGreaterUint16x32(v) + case OpGreaterUint16x8: + return rewriteValueAMD64_OpGreaterUint16x8(v) + case OpGreaterUint32x16: + return rewriteValueAMD64_OpGreaterUint32x16(v) + case OpGreaterUint32x4: + return rewriteValueAMD64_OpGreaterUint32x4(v) + case OpGreaterUint32x8: + return rewriteValueAMD64_OpGreaterUint32x8(v) + case OpGreaterUint64x2: + return rewriteValueAMD64_OpGreaterUint64x2(v) + case OpGreaterUint64x4: + return rewriteValueAMD64_OpGreaterUint64x4(v) + case OpGreaterUint64x8: + return rewriteValueAMD64_OpGreaterUint64x8(v) + case OpGreaterUint8x16: + return rewriteValueAMD64_OpGreaterUint8x16(v) + case OpGreaterUint8x32: + return rewriteValueAMD64_OpGreaterUint8x32(v) + case OpGreaterUint8x64: + return rewriteValueAMD64_OpGreaterUint8x64(v) case OpHasCPUFeature: return rewriteValueAMD64_OpHasCPUFeature(v) case OpHmul32: @@ -819,6 +1235,18 @@ func rewriteValueAMD64(v *Value) bool { return true case OpIsInBounds: return rewriteValueAMD64_OpIsInBounds(v) + case OpIsNanFloat32x16: + return rewriteValueAMD64_OpIsNanFloat32x16(v) + case OpIsNanFloat32x4: + return rewriteValueAMD64_OpIsNanFloat32x4(v) + case OpIsNanFloat32x8: + return rewriteValueAMD64_OpIsNanFloat32x8(v) + case OpIsNanFloat64x2: + return rewriteValueAMD64_OpIsNanFloat64x2(v) + case OpIsNanFloat64x4: + return rewriteValueAMD64_OpIsNanFloat64x4(v) + case OpIsNanFloat64x8: + return rewriteValueAMD64_OpIsNanFloat64x8(v) case OpIsNonNil: return rewriteValueAMD64_OpIsNonNil(v) case OpIsSliceInBounds: @@ -863,6 +1291,126 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLess8(v) case OpLess8U: return rewriteValueAMD64_OpLess8U(v) + case OpLessEqualFloat32x16: + return rewriteValueAMD64_OpLessEqualFloat32x16(v) + case OpLessEqualFloat32x4: + return rewriteValueAMD64_OpLessEqualFloat32x4(v) + case OpLessEqualFloat32x8: + return rewriteValueAMD64_OpLessEqualFloat32x8(v) + case OpLessEqualFloat64x2: + return rewriteValueAMD64_OpLessEqualFloat64x2(v) + case OpLessEqualFloat64x4: + return rewriteValueAMD64_OpLessEqualFloat64x4(v) + case OpLessEqualFloat64x8: + return rewriteValueAMD64_OpLessEqualFloat64x8(v) + case OpLessEqualInt16x16: + return rewriteValueAMD64_OpLessEqualInt16x16(v) + case OpLessEqualInt16x32: + return rewriteValueAMD64_OpLessEqualInt16x32(v) + case OpLessEqualInt16x8: + return rewriteValueAMD64_OpLessEqualInt16x8(v) + case OpLessEqualInt32x16: + return rewriteValueAMD64_OpLessEqualInt32x16(v) + case OpLessEqualInt32x4: + return rewriteValueAMD64_OpLessEqualInt32x4(v) + case OpLessEqualInt32x8: + return rewriteValueAMD64_OpLessEqualInt32x8(v) + case OpLessEqualInt64x2: + return rewriteValueAMD64_OpLessEqualInt64x2(v) + case OpLessEqualInt64x4: + return rewriteValueAMD64_OpLessEqualInt64x4(v) + case OpLessEqualInt64x8: + return rewriteValueAMD64_OpLessEqualInt64x8(v) + case OpLessEqualInt8x16: + return rewriteValueAMD64_OpLessEqualInt8x16(v) + case OpLessEqualInt8x32: + return rewriteValueAMD64_OpLessEqualInt8x32(v) + case OpLessEqualInt8x64: + return rewriteValueAMD64_OpLessEqualInt8x64(v) + case OpLessEqualUint16x16: + return rewriteValueAMD64_OpLessEqualUint16x16(v) + case OpLessEqualUint16x32: + return rewriteValueAMD64_OpLessEqualUint16x32(v) + case OpLessEqualUint16x8: + return rewriteValueAMD64_OpLessEqualUint16x8(v) + case OpLessEqualUint32x16: + return rewriteValueAMD64_OpLessEqualUint32x16(v) + case OpLessEqualUint32x4: + return rewriteValueAMD64_OpLessEqualUint32x4(v) + case OpLessEqualUint32x8: + return rewriteValueAMD64_OpLessEqualUint32x8(v) + case OpLessEqualUint64x2: + return rewriteValueAMD64_OpLessEqualUint64x2(v) + case OpLessEqualUint64x4: + return rewriteValueAMD64_OpLessEqualUint64x4(v) + case OpLessEqualUint64x8: + return rewriteValueAMD64_OpLessEqualUint64x8(v) + case OpLessEqualUint8x16: + return rewriteValueAMD64_OpLessEqualUint8x16(v) + case OpLessEqualUint8x32: + return rewriteValueAMD64_OpLessEqualUint8x32(v) + case OpLessEqualUint8x64: + return rewriteValueAMD64_OpLessEqualUint8x64(v) + case OpLessFloat32x16: + return rewriteValueAMD64_OpLessFloat32x16(v) + case OpLessFloat32x4: + return rewriteValueAMD64_OpLessFloat32x4(v) + case OpLessFloat32x8: + return rewriteValueAMD64_OpLessFloat32x8(v) + case OpLessFloat64x2: + return rewriteValueAMD64_OpLessFloat64x2(v) + case OpLessFloat64x4: + return rewriteValueAMD64_OpLessFloat64x4(v) + case OpLessFloat64x8: + return rewriteValueAMD64_OpLessFloat64x8(v) + case OpLessInt16x16: + return rewriteValueAMD64_OpLessInt16x16(v) + case OpLessInt16x32: + return rewriteValueAMD64_OpLessInt16x32(v) + case OpLessInt16x8: + return rewriteValueAMD64_OpLessInt16x8(v) + case OpLessInt32x16: + return rewriteValueAMD64_OpLessInt32x16(v) + case OpLessInt32x4: + return rewriteValueAMD64_OpLessInt32x4(v) + case OpLessInt32x8: + return rewriteValueAMD64_OpLessInt32x8(v) + case OpLessInt64x2: + return rewriteValueAMD64_OpLessInt64x2(v) + case OpLessInt64x4: + return rewriteValueAMD64_OpLessInt64x4(v) + case OpLessInt64x8: + return rewriteValueAMD64_OpLessInt64x8(v) + case OpLessInt8x16: + return rewriteValueAMD64_OpLessInt8x16(v) + case OpLessInt8x32: + return rewriteValueAMD64_OpLessInt8x32(v) + case OpLessInt8x64: + return rewriteValueAMD64_OpLessInt8x64(v) + case OpLessUint16x16: + return rewriteValueAMD64_OpLessUint16x16(v) + case OpLessUint16x32: + return rewriteValueAMD64_OpLessUint16x32(v) + case OpLessUint16x8: + return rewriteValueAMD64_OpLessUint16x8(v) + case OpLessUint32x16: + return rewriteValueAMD64_OpLessUint32x16(v) + case OpLessUint32x4: + return rewriteValueAMD64_OpLessUint32x4(v) + case OpLessUint32x8: + return rewriteValueAMD64_OpLessUint32x8(v) + case OpLessUint64x2: + return rewriteValueAMD64_OpLessUint64x2(v) + case OpLessUint64x4: + return rewriteValueAMD64_OpLessUint64x4(v) + case OpLessUint64x8: + return rewriteValueAMD64_OpLessUint64x8(v) + case OpLessUint8x16: + return rewriteValueAMD64_OpLessUint8x16(v) + case OpLessUint8x32: + return rewriteValueAMD64_OpLessUint8x32(v) + case OpLessUint8x64: + return rewriteValueAMD64_OpLessUint8x64(v) case OpLoad: return rewriteValueAMD64_OpLoad(v) case OpLocalAddr: @@ -899,14 +1447,1136 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLsh8x64(v) case OpLsh8x8: return rewriteValueAMD64_OpLsh8x8(v) + case OpMaskedAbsoluteInt16x16: + return rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v) + case OpMaskedAbsoluteInt16x32: + return rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v) + case OpMaskedAbsoluteInt16x8: + return rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v) + case OpMaskedAbsoluteInt32x16: + return rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v) + case OpMaskedAbsoluteInt32x4: + return rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v) + case OpMaskedAbsoluteInt32x8: + return rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v) + case OpMaskedAbsoluteInt64x2: + return rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v) + case OpMaskedAbsoluteInt64x4: + return rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v) + case OpMaskedAbsoluteInt64x8: + return rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v) + case OpMaskedAbsoluteInt8x16: + return rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v) + case OpMaskedAbsoluteInt8x32: + return rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v) + case OpMaskedAbsoluteInt8x64: + return rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v) + case OpMaskedAddFloat32x16: + return rewriteValueAMD64_OpMaskedAddFloat32x16(v) + case OpMaskedAddFloat32x4: + return rewriteValueAMD64_OpMaskedAddFloat32x4(v) + case OpMaskedAddFloat32x8: + return rewriteValueAMD64_OpMaskedAddFloat32x8(v) + case OpMaskedAddFloat64x2: + return rewriteValueAMD64_OpMaskedAddFloat64x2(v) + case OpMaskedAddFloat64x4: + return rewriteValueAMD64_OpMaskedAddFloat64x4(v) + case OpMaskedAddFloat64x8: + return rewriteValueAMD64_OpMaskedAddFloat64x8(v) + case OpMaskedAddInt16x16: + return rewriteValueAMD64_OpMaskedAddInt16x16(v) + case OpMaskedAddInt16x32: + return rewriteValueAMD64_OpMaskedAddInt16x32(v) + case OpMaskedAddInt16x8: + return rewriteValueAMD64_OpMaskedAddInt16x8(v) + case OpMaskedAddInt32x16: + return rewriteValueAMD64_OpMaskedAddInt32x16(v) + case OpMaskedAddInt32x4: + return rewriteValueAMD64_OpMaskedAddInt32x4(v) + case OpMaskedAddInt32x8: + return rewriteValueAMD64_OpMaskedAddInt32x8(v) + case OpMaskedAddInt64x2: + return rewriteValueAMD64_OpMaskedAddInt64x2(v) + case OpMaskedAddInt64x4: + return rewriteValueAMD64_OpMaskedAddInt64x4(v) + case OpMaskedAddInt64x8: + return rewriteValueAMD64_OpMaskedAddInt64x8(v) + case OpMaskedAddInt8x16: + return rewriteValueAMD64_OpMaskedAddInt8x16(v) + case OpMaskedAddInt8x32: + return rewriteValueAMD64_OpMaskedAddInt8x32(v) + case OpMaskedAddInt8x64: + return rewriteValueAMD64_OpMaskedAddInt8x64(v) + case OpMaskedAddUint16x16: + return rewriteValueAMD64_OpMaskedAddUint16x16(v) + case OpMaskedAddUint16x32: + return rewriteValueAMD64_OpMaskedAddUint16x32(v) + case OpMaskedAddUint16x8: + return rewriteValueAMD64_OpMaskedAddUint16x8(v) + case OpMaskedAddUint32x16: + return rewriteValueAMD64_OpMaskedAddUint32x16(v) + case OpMaskedAddUint32x4: + return rewriteValueAMD64_OpMaskedAddUint32x4(v) + case OpMaskedAddUint32x8: + return rewriteValueAMD64_OpMaskedAddUint32x8(v) + case OpMaskedAddUint64x2: + return rewriteValueAMD64_OpMaskedAddUint64x2(v) + case OpMaskedAddUint64x4: + return rewriteValueAMD64_OpMaskedAddUint64x4(v) + case OpMaskedAddUint64x8: + return rewriteValueAMD64_OpMaskedAddUint64x8(v) + case OpMaskedAddUint8x16: + return rewriteValueAMD64_OpMaskedAddUint8x16(v) + case OpMaskedAddUint8x32: + return rewriteValueAMD64_OpMaskedAddUint8x32(v) + case OpMaskedAddUint8x64: + return rewriteValueAMD64_OpMaskedAddUint8x64(v) + case OpMaskedAndFloat32x16: + return rewriteValueAMD64_OpMaskedAndFloat32x16(v) + case OpMaskedAndFloat32x4: + return rewriteValueAMD64_OpMaskedAndFloat32x4(v) + case OpMaskedAndFloat32x8: + return rewriteValueAMD64_OpMaskedAndFloat32x8(v) + case OpMaskedAndFloat64x2: + return rewriteValueAMD64_OpMaskedAndFloat64x2(v) + case OpMaskedAndFloat64x4: + return rewriteValueAMD64_OpMaskedAndFloat64x4(v) + case OpMaskedAndFloat64x8: + return rewriteValueAMD64_OpMaskedAndFloat64x8(v) + case OpMaskedAndInt32x16: + return rewriteValueAMD64_OpMaskedAndInt32x16(v) + case OpMaskedAndInt32x4: + return rewriteValueAMD64_OpMaskedAndInt32x4(v) + case OpMaskedAndInt32x8: + return rewriteValueAMD64_OpMaskedAndInt32x8(v) + case OpMaskedAndInt64x2: + return rewriteValueAMD64_OpMaskedAndInt64x2(v) + case OpMaskedAndInt64x4: + return rewriteValueAMD64_OpMaskedAndInt64x4(v) + case OpMaskedAndInt64x8: + return rewriteValueAMD64_OpMaskedAndInt64x8(v) + case OpMaskedAndNotFloat32x16: + return rewriteValueAMD64_OpMaskedAndNotFloat32x16(v) + case OpMaskedAndNotFloat32x4: + return rewriteValueAMD64_OpMaskedAndNotFloat32x4(v) + case OpMaskedAndNotFloat32x8: + return rewriteValueAMD64_OpMaskedAndNotFloat32x8(v) + case OpMaskedAndNotFloat64x2: + return rewriteValueAMD64_OpMaskedAndNotFloat64x2(v) + case OpMaskedAndNotFloat64x4: + return rewriteValueAMD64_OpMaskedAndNotFloat64x4(v) + case OpMaskedAndNotFloat64x8: + return rewriteValueAMD64_OpMaskedAndNotFloat64x8(v) + case OpMaskedAndNotInt32x16: + return rewriteValueAMD64_OpMaskedAndNotInt32x16(v) + case OpMaskedAndNotInt32x4: + return rewriteValueAMD64_OpMaskedAndNotInt32x4(v) + case OpMaskedAndNotInt32x8: + return rewriteValueAMD64_OpMaskedAndNotInt32x8(v) + case OpMaskedAndNotInt64x2: + return rewriteValueAMD64_OpMaskedAndNotInt64x2(v) + case OpMaskedAndNotInt64x4: + return rewriteValueAMD64_OpMaskedAndNotInt64x4(v) + case OpMaskedAndNotInt64x8: + return rewriteValueAMD64_OpMaskedAndNotInt64x8(v) + case OpMaskedAndNotUint32x16: + return rewriteValueAMD64_OpMaskedAndNotUint32x16(v) + case OpMaskedAndNotUint32x4: + return rewriteValueAMD64_OpMaskedAndNotUint32x4(v) + case OpMaskedAndNotUint32x8: + return rewriteValueAMD64_OpMaskedAndNotUint32x8(v) + case OpMaskedAndNotUint64x2: + return rewriteValueAMD64_OpMaskedAndNotUint64x2(v) + case OpMaskedAndNotUint64x4: + return rewriteValueAMD64_OpMaskedAndNotUint64x4(v) + case OpMaskedAndNotUint64x8: + return rewriteValueAMD64_OpMaskedAndNotUint64x8(v) + case OpMaskedAndUint32x16: + return rewriteValueAMD64_OpMaskedAndUint32x16(v) + case OpMaskedAndUint32x4: + return rewriteValueAMD64_OpMaskedAndUint32x4(v) + case OpMaskedAndUint32x8: + return rewriteValueAMD64_OpMaskedAndUint32x8(v) + case OpMaskedAndUint64x2: + return rewriteValueAMD64_OpMaskedAndUint64x2(v) + case OpMaskedAndUint64x4: + return rewriteValueAMD64_OpMaskedAndUint64x4(v) + case OpMaskedAndUint64x8: + return rewriteValueAMD64_OpMaskedAndUint64x8(v) + case OpMaskedApproximateReciprocalFloat32x16: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v) + case OpMaskedApproximateReciprocalFloat32x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v) + case OpMaskedApproximateReciprocalFloat32x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v) + case OpMaskedApproximateReciprocalFloat64x2: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v) + case OpMaskedApproximateReciprocalFloat64x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v) + case OpMaskedApproximateReciprocalFloat64x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v) + case OpMaskedApproximateReciprocalOfSqrtFloat32x16: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v) + case OpMaskedApproximateReciprocalOfSqrtFloat32x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v) + case OpMaskedApproximateReciprocalOfSqrtFloat32x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v) + case OpMaskedApproximateReciprocalOfSqrtFloat64x2: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v) + case OpMaskedApproximateReciprocalOfSqrtFloat64x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v) + case OpMaskedApproximateReciprocalOfSqrtFloat64x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v) + case OpMaskedAverageUint16x16: + return rewriteValueAMD64_OpMaskedAverageUint16x16(v) + case OpMaskedAverageUint16x32: + return rewriteValueAMD64_OpMaskedAverageUint16x32(v) + case OpMaskedAverageUint16x8: + return rewriteValueAMD64_OpMaskedAverageUint16x8(v) + case OpMaskedAverageUint8x16: + return rewriteValueAMD64_OpMaskedAverageUint8x16(v) + case OpMaskedAverageUint8x32: + return rewriteValueAMD64_OpMaskedAverageUint8x32(v) + case OpMaskedAverageUint8x64: + return rewriteValueAMD64_OpMaskedAverageUint8x64(v) + case OpMaskedDivFloat32x16: + return rewriteValueAMD64_OpMaskedDivFloat32x16(v) + case OpMaskedDivFloat32x4: + return rewriteValueAMD64_OpMaskedDivFloat32x4(v) + case OpMaskedDivFloat32x8: + return rewriteValueAMD64_OpMaskedDivFloat32x8(v) + case OpMaskedDivFloat64x2: + return rewriteValueAMD64_OpMaskedDivFloat64x2(v) + case OpMaskedDivFloat64x4: + return rewriteValueAMD64_OpMaskedDivFloat64x4(v) + case OpMaskedDivFloat64x8: + return rewriteValueAMD64_OpMaskedDivFloat64x8(v) + case OpMaskedEqualFloat32x16: + return rewriteValueAMD64_OpMaskedEqualFloat32x16(v) + case OpMaskedEqualFloat32x4: + return rewriteValueAMD64_OpMaskedEqualFloat32x4(v) + case OpMaskedEqualFloat32x8: + return rewriteValueAMD64_OpMaskedEqualFloat32x8(v) + case OpMaskedEqualFloat64x2: + return rewriteValueAMD64_OpMaskedEqualFloat64x2(v) + case OpMaskedEqualFloat64x4: + return rewriteValueAMD64_OpMaskedEqualFloat64x4(v) + case OpMaskedEqualFloat64x8: + return rewriteValueAMD64_OpMaskedEqualFloat64x8(v) + case OpMaskedEqualInt16x16: + return rewriteValueAMD64_OpMaskedEqualInt16x16(v) + case OpMaskedEqualInt16x32: + return rewriteValueAMD64_OpMaskedEqualInt16x32(v) + case OpMaskedEqualInt16x8: + return rewriteValueAMD64_OpMaskedEqualInt16x8(v) + case OpMaskedEqualInt32x16: + return rewriteValueAMD64_OpMaskedEqualInt32x16(v) + case OpMaskedEqualInt32x4: + return rewriteValueAMD64_OpMaskedEqualInt32x4(v) + case OpMaskedEqualInt32x8: + return rewriteValueAMD64_OpMaskedEqualInt32x8(v) + case OpMaskedEqualInt64x2: + return rewriteValueAMD64_OpMaskedEqualInt64x2(v) + case OpMaskedEqualInt64x4: + return rewriteValueAMD64_OpMaskedEqualInt64x4(v) + case OpMaskedEqualInt64x8: + return rewriteValueAMD64_OpMaskedEqualInt64x8(v) + case OpMaskedEqualInt8x16: + return rewriteValueAMD64_OpMaskedEqualInt8x16(v) + case OpMaskedEqualInt8x32: + return rewriteValueAMD64_OpMaskedEqualInt8x32(v) + case OpMaskedEqualInt8x64: + return rewriteValueAMD64_OpMaskedEqualInt8x64(v) + case OpMaskedEqualUint16x16: + return rewriteValueAMD64_OpMaskedEqualUint16x16(v) + case OpMaskedEqualUint16x32: + return rewriteValueAMD64_OpMaskedEqualUint16x32(v) + case OpMaskedEqualUint16x8: + return rewriteValueAMD64_OpMaskedEqualUint16x8(v) + case OpMaskedEqualUint32x16: + return rewriteValueAMD64_OpMaskedEqualUint32x16(v) + case OpMaskedEqualUint32x4: + return rewriteValueAMD64_OpMaskedEqualUint32x4(v) + case OpMaskedEqualUint32x8: + return rewriteValueAMD64_OpMaskedEqualUint32x8(v) + case OpMaskedEqualUint64x2: + return rewriteValueAMD64_OpMaskedEqualUint64x2(v) + case OpMaskedEqualUint64x4: + return rewriteValueAMD64_OpMaskedEqualUint64x4(v) + case OpMaskedEqualUint64x8: + return rewriteValueAMD64_OpMaskedEqualUint64x8(v) + case OpMaskedEqualUint8x16: + return rewriteValueAMD64_OpMaskedEqualUint8x16(v) + case OpMaskedEqualUint8x32: + return rewriteValueAMD64_OpMaskedEqualUint8x32(v) + case OpMaskedEqualUint8x64: + return rewriteValueAMD64_OpMaskedEqualUint8x64(v) + case OpMaskedGreaterEqualFloat32x16: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) + case OpMaskedGreaterEqualFloat32x4: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v) + case OpMaskedGreaterEqualFloat32x8: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v) + case OpMaskedGreaterEqualFloat64x2: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v) + case OpMaskedGreaterEqualFloat64x4: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v) + case OpMaskedGreaterEqualFloat64x8: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v) + case OpMaskedGreaterEqualInt16x16: + return rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v) + case OpMaskedGreaterEqualInt16x32: + return rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v) + case OpMaskedGreaterEqualInt16x8: + return rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v) + case OpMaskedGreaterEqualInt32x16: + return rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v) + case OpMaskedGreaterEqualInt32x4: + return rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v) + case OpMaskedGreaterEqualInt32x8: + return rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v) + case OpMaskedGreaterEqualInt64x2: + return rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v) + case OpMaskedGreaterEqualInt64x4: + return rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v) + case OpMaskedGreaterEqualInt64x8: + return rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v) + case OpMaskedGreaterEqualInt8x16: + return rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v) + case OpMaskedGreaterEqualInt8x32: + return rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v) + case OpMaskedGreaterEqualInt8x64: + return rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v) + case OpMaskedGreaterEqualUint16x16: + return rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v) + case OpMaskedGreaterEqualUint16x32: + return rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v) + case OpMaskedGreaterEqualUint16x8: + return rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v) + case OpMaskedGreaterEqualUint32x16: + return rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v) + case OpMaskedGreaterEqualUint32x4: + return rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v) + case OpMaskedGreaterEqualUint32x8: + return rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v) + case OpMaskedGreaterEqualUint64x2: + return rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v) + case OpMaskedGreaterEqualUint64x4: + return rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v) + case OpMaskedGreaterEqualUint64x8: + return rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v) + case OpMaskedGreaterEqualUint8x16: + return rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v) + case OpMaskedGreaterEqualUint8x32: + return rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v) + case OpMaskedGreaterEqualUint8x64: + return rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v) + case OpMaskedGreaterFloat32x16: + return rewriteValueAMD64_OpMaskedGreaterFloat32x16(v) + case OpMaskedGreaterFloat32x4: + return rewriteValueAMD64_OpMaskedGreaterFloat32x4(v) + case OpMaskedGreaterFloat32x8: + return rewriteValueAMD64_OpMaskedGreaterFloat32x8(v) + case OpMaskedGreaterFloat64x2: + return rewriteValueAMD64_OpMaskedGreaterFloat64x2(v) + case OpMaskedGreaterFloat64x4: + return rewriteValueAMD64_OpMaskedGreaterFloat64x4(v) + case OpMaskedGreaterFloat64x8: + return rewriteValueAMD64_OpMaskedGreaterFloat64x8(v) + case OpMaskedGreaterInt16x16: + return rewriteValueAMD64_OpMaskedGreaterInt16x16(v) + case OpMaskedGreaterInt16x32: + return rewriteValueAMD64_OpMaskedGreaterInt16x32(v) + case OpMaskedGreaterInt16x8: + return rewriteValueAMD64_OpMaskedGreaterInt16x8(v) + case OpMaskedGreaterInt32x16: + return rewriteValueAMD64_OpMaskedGreaterInt32x16(v) + case OpMaskedGreaterInt32x4: + return rewriteValueAMD64_OpMaskedGreaterInt32x4(v) + case OpMaskedGreaterInt32x8: + return rewriteValueAMD64_OpMaskedGreaterInt32x8(v) + case OpMaskedGreaterInt64x2: + return rewriteValueAMD64_OpMaskedGreaterInt64x2(v) + case OpMaskedGreaterInt64x4: + return rewriteValueAMD64_OpMaskedGreaterInt64x4(v) + case OpMaskedGreaterInt64x8: + return rewriteValueAMD64_OpMaskedGreaterInt64x8(v) + case OpMaskedGreaterInt8x16: + return rewriteValueAMD64_OpMaskedGreaterInt8x16(v) + case OpMaskedGreaterInt8x32: + return rewriteValueAMD64_OpMaskedGreaterInt8x32(v) + case OpMaskedGreaterInt8x64: + return rewriteValueAMD64_OpMaskedGreaterInt8x64(v) + case OpMaskedGreaterUint16x16: + return rewriteValueAMD64_OpMaskedGreaterUint16x16(v) + case OpMaskedGreaterUint16x32: + return rewriteValueAMD64_OpMaskedGreaterUint16x32(v) + case OpMaskedGreaterUint16x8: + return rewriteValueAMD64_OpMaskedGreaterUint16x8(v) + case OpMaskedGreaterUint32x16: + return rewriteValueAMD64_OpMaskedGreaterUint32x16(v) + case OpMaskedGreaterUint32x4: + return rewriteValueAMD64_OpMaskedGreaterUint32x4(v) + case OpMaskedGreaterUint32x8: + return rewriteValueAMD64_OpMaskedGreaterUint32x8(v) + case OpMaskedGreaterUint64x2: + return rewriteValueAMD64_OpMaskedGreaterUint64x2(v) + case OpMaskedGreaterUint64x4: + return rewriteValueAMD64_OpMaskedGreaterUint64x4(v) + case OpMaskedGreaterUint64x8: + return rewriteValueAMD64_OpMaskedGreaterUint64x8(v) + case OpMaskedGreaterUint8x16: + return rewriteValueAMD64_OpMaskedGreaterUint8x16(v) + case OpMaskedGreaterUint8x32: + return rewriteValueAMD64_OpMaskedGreaterUint8x32(v) + case OpMaskedGreaterUint8x64: + return rewriteValueAMD64_OpMaskedGreaterUint8x64(v) + case OpMaskedIsNanFloat32x16: + return rewriteValueAMD64_OpMaskedIsNanFloat32x16(v) + case OpMaskedIsNanFloat32x4: + return rewriteValueAMD64_OpMaskedIsNanFloat32x4(v) + case OpMaskedIsNanFloat32x8: + return rewriteValueAMD64_OpMaskedIsNanFloat32x8(v) + case OpMaskedIsNanFloat64x2: + return rewriteValueAMD64_OpMaskedIsNanFloat64x2(v) + case OpMaskedIsNanFloat64x4: + return rewriteValueAMD64_OpMaskedIsNanFloat64x4(v) + case OpMaskedIsNanFloat64x8: + return rewriteValueAMD64_OpMaskedIsNanFloat64x8(v) + case OpMaskedLessEqualFloat32x16: + return rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v) + case OpMaskedLessEqualFloat32x4: + return rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v) + case OpMaskedLessEqualFloat32x8: + return rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v) + case OpMaskedLessEqualFloat64x2: + return rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v) + case OpMaskedLessEqualFloat64x4: + return rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v) + case OpMaskedLessEqualFloat64x8: + return rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v) + case OpMaskedLessEqualInt16x16: + return rewriteValueAMD64_OpMaskedLessEqualInt16x16(v) + case OpMaskedLessEqualInt16x32: + return rewriteValueAMD64_OpMaskedLessEqualInt16x32(v) + case OpMaskedLessEqualInt16x8: + return rewriteValueAMD64_OpMaskedLessEqualInt16x8(v) + case OpMaskedLessEqualInt32x16: + return rewriteValueAMD64_OpMaskedLessEqualInt32x16(v) + case OpMaskedLessEqualInt32x4: + return rewriteValueAMD64_OpMaskedLessEqualInt32x4(v) + case OpMaskedLessEqualInt32x8: + return rewriteValueAMD64_OpMaskedLessEqualInt32x8(v) + case OpMaskedLessEqualInt64x2: + return rewriteValueAMD64_OpMaskedLessEqualInt64x2(v) + case OpMaskedLessEqualInt64x4: + return rewriteValueAMD64_OpMaskedLessEqualInt64x4(v) + case OpMaskedLessEqualInt64x8: + return rewriteValueAMD64_OpMaskedLessEqualInt64x8(v) + case OpMaskedLessEqualInt8x16: + return rewriteValueAMD64_OpMaskedLessEqualInt8x16(v) + case OpMaskedLessEqualInt8x32: + return rewriteValueAMD64_OpMaskedLessEqualInt8x32(v) + case OpMaskedLessEqualInt8x64: + return rewriteValueAMD64_OpMaskedLessEqualInt8x64(v) + case OpMaskedLessEqualUint16x16: + return rewriteValueAMD64_OpMaskedLessEqualUint16x16(v) + case OpMaskedLessEqualUint16x32: + return rewriteValueAMD64_OpMaskedLessEqualUint16x32(v) + case OpMaskedLessEqualUint16x8: + return rewriteValueAMD64_OpMaskedLessEqualUint16x8(v) + case OpMaskedLessEqualUint32x16: + return rewriteValueAMD64_OpMaskedLessEqualUint32x16(v) + case OpMaskedLessEqualUint32x4: + return rewriteValueAMD64_OpMaskedLessEqualUint32x4(v) + case OpMaskedLessEqualUint32x8: + return rewriteValueAMD64_OpMaskedLessEqualUint32x8(v) + case OpMaskedLessEqualUint64x2: + return rewriteValueAMD64_OpMaskedLessEqualUint64x2(v) + case OpMaskedLessEqualUint64x4: + return rewriteValueAMD64_OpMaskedLessEqualUint64x4(v) + case OpMaskedLessEqualUint64x8: + return rewriteValueAMD64_OpMaskedLessEqualUint64x8(v) + case OpMaskedLessEqualUint8x16: + return rewriteValueAMD64_OpMaskedLessEqualUint8x16(v) + case OpMaskedLessEqualUint8x32: + return rewriteValueAMD64_OpMaskedLessEqualUint8x32(v) + case OpMaskedLessEqualUint8x64: + return rewriteValueAMD64_OpMaskedLessEqualUint8x64(v) + case OpMaskedLessFloat32x16: + return rewriteValueAMD64_OpMaskedLessFloat32x16(v) + case OpMaskedLessFloat32x4: + return rewriteValueAMD64_OpMaskedLessFloat32x4(v) + case OpMaskedLessFloat32x8: + return rewriteValueAMD64_OpMaskedLessFloat32x8(v) + case OpMaskedLessFloat64x2: + return rewriteValueAMD64_OpMaskedLessFloat64x2(v) + case OpMaskedLessFloat64x4: + return rewriteValueAMD64_OpMaskedLessFloat64x4(v) + case OpMaskedLessFloat64x8: + return rewriteValueAMD64_OpMaskedLessFloat64x8(v) + case OpMaskedLessInt16x16: + return rewriteValueAMD64_OpMaskedLessInt16x16(v) + case OpMaskedLessInt16x32: + return rewriteValueAMD64_OpMaskedLessInt16x32(v) + case OpMaskedLessInt16x8: + return rewriteValueAMD64_OpMaskedLessInt16x8(v) + case OpMaskedLessInt32x16: + return rewriteValueAMD64_OpMaskedLessInt32x16(v) + case OpMaskedLessInt32x4: + return rewriteValueAMD64_OpMaskedLessInt32x4(v) + case OpMaskedLessInt32x8: + return rewriteValueAMD64_OpMaskedLessInt32x8(v) + case OpMaskedLessInt64x2: + return rewriteValueAMD64_OpMaskedLessInt64x2(v) + case OpMaskedLessInt64x4: + return rewriteValueAMD64_OpMaskedLessInt64x4(v) + case OpMaskedLessInt64x8: + return rewriteValueAMD64_OpMaskedLessInt64x8(v) + case OpMaskedLessInt8x16: + return rewriteValueAMD64_OpMaskedLessInt8x16(v) + case OpMaskedLessInt8x32: + return rewriteValueAMD64_OpMaskedLessInt8x32(v) + case OpMaskedLessInt8x64: + return rewriteValueAMD64_OpMaskedLessInt8x64(v) + case OpMaskedLessUint16x16: + return rewriteValueAMD64_OpMaskedLessUint16x16(v) + case OpMaskedLessUint16x32: + return rewriteValueAMD64_OpMaskedLessUint16x32(v) + case OpMaskedLessUint16x8: + return rewriteValueAMD64_OpMaskedLessUint16x8(v) + case OpMaskedLessUint32x16: + return rewriteValueAMD64_OpMaskedLessUint32x16(v) + case OpMaskedLessUint32x4: + return rewriteValueAMD64_OpMaskedLessUint32x4(v) + case OpMaskedLessUint32x8: + return rewriteValueAMD64_OpMaskedLessUint32x8(v) + case OpMaskedLessUint64x2: + return rewriteValueAMD64_OpMaskedLessUint64x2(v) + case OpMaskedLessUint64x4: + return rewriteValueAMD64_OpMaskedLessUint64x4(v) + case OpMaskedLessUint64x8: + return rewriteValueAMD64_OpMaskedLessUint64x8(v) + case OpMaskedLessUint8x16: + return rewriteValueAMD64_OpMaskedLessUint8x16(v) + case OpMaskedLessUint8x32: + return rewriteValueAMD64_OpMaskedLessUint8x32(v) + case OpMaskedLessUint8x64: + return rewriteValueAMD64_OpMaskedLessUint8x64(v) + case OpMaskedMaxFloat32x16: + return rewriteValueAMD64_OpMaskedMaxFloat32x16(v) + case OpMaskedMaxFloat32x4: + return rewriteValueAMD64_OpMaskedMaxFloat32x4(v) + case OpMaskedMaxFloat32x8: + return rewriteValueAMD64_OpMaskedMaxFloat32x8(v) + case OpMaskedMaxFloat64x2: + return rewriteValueAMD64_OpMaskedMaxFloat64x2(v) + case OpMaskedMaxFloat64x4: + return rewriteValueAMD64_OpMaskedMaxFloat64x4(v) + case OpMaskedMaxFloat64x8: + return rewriteValueAMD64_OpMaskedMaxFloat64x8(v) + case OpMaskedMaxInt16x16: + return rewriteValueAMD64_OpMaskedMaxInt16x16(v) + case OpMaskedMaxInt16x32: + return rewriteValueAMD64_OpMaskedMaxInt16x32(v) + case OpMaskedMaxInt16x8: + return rewriteValueAMD64_OpMaskedMaxInt16x8(v) + case OpMaskedMaxInt32x16: + return rewriteValueAMD64_OpMaskedMaxInt32x16(v) + case OpMaskedMaxInt32x4: + return rewriteValueAMD64_OpMaskedMaxInt32x4(v) + case OpMaskedMaxInt32x8: + return rewriteValueAMD64_OpMaskedMaxInt32x8(v) + case OpMaskedMaxInt64x2: + return rewriteValueAMD64_OpMaskedMaxInt64x2(v) + case OpMaskedMaxInt64x4: + return rewriteValueAMD64_OpMaskedMaxInt64x4(v) + case OpMaskedMaxInt64x8: + return rewriteValueAMD64_OpMaskedMaxInt64x8(v) + case OpMaskedMaxInt8x16: + return rewriteValueAMD64_OpMaskedMaxInt8x16(v) + case OpMaskedMaxInt8x32: + return rewriteValueAMD64_OpMaskedMaxInt8x32(v) + case OpMaskedMaxInt8x64: + return rewriteValueAMD64_OpMaskedMaxInt8x64(v) + case OpMaskedMaxUint16x16: + return rewriteValueAMD64_OpMaskedMaxUint16x16(v) + case OpMaskedMaxUint16x32: + return rewriteValueAMD64_OpMaskedMaxUint16x32(v) + case OpMaskedMaxUint16x8: + return rewriteValueAMD64_OpMaskedMaxUint16x8(v) + case OpMaskedMaxUint32x16: + return rewriteValueAMD64_OpMaskedMaxUint32x16(v) + case OpMaskedMaxUint32x4: + return rewriteValueAMD64_OpMaskedMaxUint32x4(v) + case OpMaskedMaxUint32x8: + return rewriteValueAMD64_OpMaskedMaxUint32x8(v) + case OpMaskedMaxUint64x2: + return rewriteValueAMD64_OpMaskedMaxUint64x2(v) + case OpMaskedMaxUint64x4: + return rewriteValueAMD64_OpMaskedMaxUint64x4(v) + case OpMaskedMaxUint64x8: + return rewriteValueAMD64_OpMaskedMaxUint64x8(v) + case OpMaskedMaxUint8x16: + return rewriteValueAMD64_OpMaskedMaxUint8x16(v) + case OpMaskedMaxUint8x32: + return rewriteValueAMD64_OpMaskedMaxUint8x32(v) + case OpMaskedMaxUint8x64: + return rewriteValueAMD64_OpMaskedMaxUint8x64(v) + case OpMaskedMinFloat32x16: + return rewriteValueAMD64_OpMaskedMinFloat32x16(v) + case OpMaskedMinFloat32x4: + return rewriteValueAMD64_OpMaskedMinFloat32x4(v) + case OpMaskedMinFloat32x8: + return rewriteValueAMD64_OpMaskedMinFloat32x8(v) + case OpMaskedMinFloat64x2: + return rewriteValueAMD64_OpMaskedMinFloat64x2(v) + case OpMaskedMinFloat64x4: + return rewriteValueAMD64_OpMaskedMinFloat64x4(v) + case OpMaskedMinFloat64x8: + return rewriteValueAMD64_OpMaskedMinFloat64x8(v) + case OpMaskedMinInt16x16: + return rewriteValueAMD64_OpMaskedMinInt16x16(v) + case OpMaskedMinInt16x32: + return rewriteValueAMD64_OpMaskedMinInt16x32(v) + case OpMaskedMinInt16x8: + return rewriteValueAMD64_OpMaskedMinInt16x8(v) + case OpMaskedMinInt32x16: + return rewriteValueAMD64_OpMaskedMinInt32x16(v) + case OpMaskedMinInt32x4: + return rewriteValueAMD64_OpMaskedMinInt32x4(v) + case OpMaskedMinInt32x8: + return rewriteValueAMD64_OpMaskedMinInt32x8(v) + case OpMaskedMinInt64x2: + return rewriteValueAMD64_OpMaskedMinInt64x2(v) + case OpMaskedMinInt64x4: + return rewriteValueAMD64_OpMaskedMinInt64x4(v) + case OpMaskedMinInt64x8: + return rewriteValueAMD64_OpMaskedMinInt64x8(v) + case OpMaskedMinInt8x16: + return rewriteValueAMD64_OpMaskedMinInt8x16(v) + case OpMaskedMinInt8x32: + return rewriteValueAMD64_OpMaskedMinInt8x32(v) + case OpMaskedMinInt8x64: + return rewriteValueAMD64_OpMaskedMinInt8x64(v) + case OpMaskedMinUint16x16: + return rewriteValueAMD64_OpMaskedMinUint16x16(v) + case OpMaskedMinUint16x32: + return rewriteValueAMD64_OpMaskedMinUint16x32(v) + case OpMaskedMinUint16x8: + return rewriteValueAMD64_OpMaskedMinUint16x8(v) + case OpMaskedMinUint32x16: + return rewriteValueAMD64_OpMaskedMinUint32x16(v) + case OpMaskedMinUint32x4: + return rewriteValueAMD64_OpMaskedMinUint32x4(v) + case OpMaskedMinUint32x8: + return rewriteValueAMD64_OpMaskedMinUint32x8(v) + case OpMaskedMinUint64x2: + return rewriteValueAMD64_OpMaskedMinUint64x2(v) + case OpMaskedMinUint64x4: + return rewriteValueAMD64_OpMaskedMinUint64x4(v) + case OpMaskedMinUint64x8: + return rewriteValueAMD64_OpMaskedMinUint64x8(v) + case OpMaskedMinUint8x16: + return rewriteValueAMD64_OpMaskedMinUint8x16(v) + case OpMaskedMinUint8x32: + return rewriteValueAMD64_OpMaskedMinUint8x32(v) + case OpMaskedMinUint8x64: + return rewriteValueAMD64_OpMaskedMinUint8x64(v) + case OpMaskedMulByPowOf2Float32x16: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v) + case OpMaskedMulByPowOf2Float32x4: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v) + case OpMaskedMulByPowOf2Float32x8: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v) + case OpMaskedMulByPowOf2Float64x2: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v) + case OpMaskedMulByPowOf2Float64x4: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v) + case OpMaskedMulByPowOf2Float64x8: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v) + case OpMaskedMulEvenWidenInt64x2: + return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v) + case OpMaskedMulEvenWidenInt64x4: + return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v) + case OpMaskedMulEvenWidenInt64x8: + return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v) + case OpMaskedMulEvenWidenUint64x2: + return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v) + case OpMaskedMulEvenWidenUint64x4: + return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v) + case OpMaskedMulEvenWidenUint64x8: + return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v) + case OpMaskedMulFloat32x16: + return rewriteValueAMD64_OpMaskedMulFloat32x16(v) + case OpMaskedMulFloat32x4: + return rewriteValueAMD64_OpMaskedMulFloat32x4(v) + case OpMaskedMulFloat32x8: + return rewriteValueAMD64_OpMaskedMulFloat32x8(v) + case OpMaskedMulFloat64x2: + return rewriteValueAMD64_OpMaskedMulFloat64x2(v) + case OpMaskedMulFloat64x4: + return rewriteValueAMD64_OpMaskedMulFloat64x4(v) + case OpMaskedMulFloat64x8: + return rewriteValueAMD64_OpMaskedMulFloat64x8(v) + case OpMaskedMulHighInt16x16: + return rewriteValueAMD64_OpMaskedMulHighInt16x16(v) + case OpMaskedMulHighInt16x32: + return rewriteValueAMD64_OpMaskedMulHighInt16x32(v) + case OpMaskedMulHighInt16x8: + return rewriteValueAMD64_OpMaskedMulHighInt16x8(v) + case OpMaskedMulHighUint16x16: + return rewriteValueAMD64_OpMaskedMulHighUint16x16(v) + case OpMaskedMulHighUint16x32: + return rewriteValueAMD64_OpMaskedMulHighUint16x32(v) + case OpMaskedMulHighUint16x8: + return rewriteValueAMD64_OpMaskedMulHighUint16x8(v) + case OpMaskedMulLowInt16x16: + return rewriteValueAMD64_OpMaskedMulLowInt16x16(v) + case OpMaskedMulLowInt16x32: + return rewriteValueAMD64_OpMaskedMulLowInt16x32(v) + case OpMaskedMulLowInt16x8: + return rewriteValueAMD64_OpMaskedMulLowInt16x8(v) + case OpMaskedMulLowInt32x16: + return rewriteValueAMD64_OpMaskedMulLowInt32x16(v) + case OpMaskedMulLowInt32x4: + return rewriteValueAMD64_OpMaskedMulLowInt32x4(v) + case OpMaskedMulLowInt32x8: + return rewriteValueAMD64_OpMaskedMulLowInt32x8(v) + case OpMaskedMulLowInt64x2: + return rewriteValueAMD64_OpMaskedMulLowInt64x2(v) + case OpMaskedMulLowInt64x4: + return rewriteValueAMD64_OpMaskedMulLowInt64x4(v) + case OpMaskedMulLowInt64x8: + return rewriteValueAMD64_OpMaskedMulLowInt64x8(v) + case OpMaskedNotEqualFloat32x16: + return rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v) + case OpMaskedNotEqualFloat32x4: + return rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v) + case OpMaskedNotEqualFloat32x8: + return rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v) + case OpMaskedNotEqualFloat64x2: + return rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v) + case OpMaskedNotEqualFloat64x4: + return rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v) + case OpMaskedNotEqualFloat64x8: + return rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v) + case OpMaskedNotEqualInt16x16: + return rewriteValueAMD64_OpMaskedNotEqualInt16x16(v) + case OpMaskedNotEqualInt16x32: + return rewriteValueAMD64_OpMaskedNotEqualInt16x32(v) + case OpMaskedNotEqualInt16x8: + return rewriteValueAMD64_OpMaskedNotEqualInt16x8(v) + case OpMaskedNotEqualInt32x16: + return rewriteValueAMD64_OpMaskedNotEqualInt32x16(v) + case OpMaskedNotEqualInt32x4: + return rewriteValueAMD64_OpMaskedNotEqualInt32x4(v) + case OpMaskedNotEqualInt32x8: + return rewriteValueAMD64_OpMaskedNotEqualInt32x8(v) + case OpMaskedNotEqualInt64x2: + return rewriteValueAMD64_OpMaskedNotEqualInt64x2(v) + case OpMaskedNotEqualInt64x4: + return rewriteValueAMD64_OpMaskedNotEqualInt64x4(v) + case OpMaskedNotEqualInt64x8: + return rewriteValueAMD64_OpMaskedNotEqualInt64x8(v) + case OpMaskedNotEqualInt8x16: + return rewriteValueAMD64_OpMaskedNotEqualInt8x16(v) + case OpMaskedNotEqualInt8x32: + return rewriteValueAMD64_OpMaskedNotEqualInt8x32(v) + case OpMaskedNotEqualInt8x64: + return rewriteValueAMD64_OpMaskedNotEqualInt8x64(v) + case OpMaskedNotEqualUint16x16: + return rewriteValueAMD64_OpMaskedNotEqualUint16x16(v) + case OpMaskedNotEqualUint16x32: + return rewriteValueAMD64_OpMaskedNotEqualUint16x32(v) + case OpMaskedNotEqualUint16x8: + return rewriteValueAMD64_OpMaskedNotEqualUint16x8(v) + case OpMaskedNotEqualUint32x16: + return rewriteValueAMD64_OpMaskedNotEqualUint32x16(v) + case OpMaskedNotEqualUint32x4: + return rewriteValueAMD64_OpMaskedNotEqualUint32x4(v) + case OpMaskedNotEqualUint32x8: + return rewriteValueAMD64_OpMaskedNotEqualUint32x8(v) + case OpMaskedNotEqualUint64x2: + return rewriteValueAMD64_OpMaskedNotEqualUint64x2(v) + case OpMaskedNotEqualUint64x4: + return rewriteValueAMD64_OpMaskedNotEqualUint64x4(v) + case OpMaskedNotEqualUint64x8: + return rewriteValueAMD64_OpMaskedNotEqualUint64x8(v) + case OpMaskedNotEqualUint8x16: + return rewriteValueAMD64_OpMaskedNotEqualUint8x16(v) + case OpMaskedNotEqualUint8x32: + return rewriteValueAMD64_OpMaskedNotEqualUint8x32(v) + case OpMaskedNotEqualUint8x64: + return rewriteValueAMD64_OpMaskedNotEqualUint8x64(v) + case OpMaskedOrFloat32x16: + return rewriteValueAMD64_OpMaskedOrFloat32x16(v) + case OpMaskedOrFloat32x4: + return rewriteValueAMD64_OpMaskedOrFloat32x4(v) + case OpMaskedOrFloat32x8: + return rewriteValueAMD64_OpMaskedOrFloat32x8(v) + case OpMaskedOrFloat64x2: + return rewriteValueAMD64_OpMaskedOrFloat64x2(v) + case OpMaskedOrFloat64x4: + return rewriteValueAMD64_OpMaskedOrFloat64x4(v) + case OpMaskedOrFloat64x8: + return rewriteValueAMD64_OpMaskedOrFloat64x8(v) + case OpMaskedOrInt32x16: + return rewriteValueAMD64_OpMaskedOrInt32x16(v) + case OpMaskedOrInt32x4: + return rewriteValueAMD64_OpMaskedOrInt32x4(v) + case OpMaskedOrInt32x8: + return rewriteValueAMD64_OpMaskedOrInt32x8(v) + case OpMaskedOrInt64x2: + return rewriteValueAMD64_OpMaskedOrInt64x2(v) + case OpMaskedOrInt64x4: + return rewriteValueAMD64_OpMaskedOrInt64x4(v) + case OpMaskedOrInt64x8: + return rewriteValueAMD64_OpMaskedOrInt64x8(v) + case OpMaskedOrUint32x16: + return rewriteValueAMD64_OpMaskedOrUint32x16(v) + case OpMaskedOrUint32x4: + return rewriteValueAMD64_OpMaskedOrUint32x4(v) + case OpMaskedOrUint32x8: + return rewriteValueAMD64_OpMaskedOrUint32x8(v) + case OpMaskedOrUint64x2: + return rewriteValueAMD64_OpMaskedOrUint64x2(v) + case OpMaskedOrUint64x4: + return rewriteValueAMD64_OpMaskedOrUint64x4(v) + case OpMaskedOrUint64x8: + return rewriteValueAMD64_OpMaskedOrUint64x8(v) + case OpMaskedPopCountInt16x16: + return rewriteValueAMD64_OpMaskedPopCountInt16x16(v) + case OpMaskedPopCountInt16x32: + return rewriteValueAMD64_OpMaskedPopCountInt16x32(v) + case OpMaskedPopCountInt16x8: + return rewriteValueAMD64_OpMaskedPopCountInt16x8(v) + case OpMaskedPopCountInt32x16: + return rewriteValueAMD64_OpMaskedPopCountInt32x16(v) + case OpMaskedPopCountInt32x4: + return rewriteValueAMD64_OpMaskedPopCountInt32x4(v) + case OpMaskedPopCountInt32x8: + return rewriteValueAMD64_OpMaskedPopCountInt32x8(v) + case OpMaskedPopCountInt64x2: + return rewriteValueAMD64_OpMaskedPopCountInt64x2(v) + case OpMaskedPopCountInt64x4: + return rewriteValueAMD64_OpMaskedPopCountInt64x4(v) + case OpMaskedPopCountInt64x8: + return rewriteValueAMD64_OpMaskedPopCountInt64x8(v) + case OpMaskedPopCountInt8x16: + return rewriteValueAMD64_OpMaskedPopCountInt8x16(v) + case OpMaskedPopCountInt8x32: + return rewriteValueAMD64_OpMaskedPopCountInt8x32(v) + case OpMaskedPopCountInt8x64: + return rewriteValueAMD64_OpMaskedPopCountInt8x64(v) + case OpMaskedPopCountUint16x16: + return rewriteValueAMD64_OpMaskedPopCountUint16x16(v) + case OpMaskedPopCountUint16x32: + return rewriteValueAMD64_OpMaskedPopCountUint16x32(v) + case OpMaskedPopCountUint16x8: + return rewriteValueAMD64_OpMaskedPopCountUint16x8(v) + case OpMaskedPopCountUint32x16: + return rewriteValueAMD64_OpMaskedPopCountUint32x16(v) + case OpMaskedPopCountUint32x4: + return rewriteValueAMD64_OpMaskedPopCountUint32x4(v) + case OpMaskedPopCountUint32x8: + return rewriteValueAMD64_OpMaskedPopCountUint32x8(v) + case OpMaskedPopCountUint64x2: + return rewriteValueAMD64_OpMaskedPopCountUint64x2(v) + case OpMaskedPopCountUint64x4: + return rewriteValueAMD64_OpMaskedPopCountUint64x4(v) + case OpMaskedPopCountUint64x8: + return rewriteValueAMD64_OpMaskedPopCountUint64x8(v) + case OpMaskedPopCountUint8x16: + return rewriteValueAMD64_OpMaskedPopCountUint8x16(v) + case OpMaskedPopCountUint8x32: + return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) + case OpMaskedPopCountUint8x64: + return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) + case OpMaskedSaturatedAddInt16x16: + return rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v) + case OpMaskedSaturatedAddInt16x32: + return rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v) + case OpMaskedSaturatedAddInt16x8: + return rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v) + case OpMaskedSaturatedAddInt8x16: + return rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v) + case OpMaskedSaturatedAddInt8x32: + return rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v) + case OpMaskedSaturatedAddInt8x64: + return rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v) + case OpMaskedSaturatedAddUint16x16: + return rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v) + case OpMaskedSaturatedAddUint16x32: + return rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v) + case OpMaskedSaturatedAddUint16x8: + return rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v) + case OpMaskedSaturatedAddUint8x16: + return rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v) + case OpMaskedSaturatedAddUint8x32: + return rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v) + case OpMaskedSaturatedAddUint8x64: + return rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v) + case OpMaskedSaturatedSubInt16x16: + return rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v) + case OpMaskedSaturatedSubInt16x32: + return rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v) + case OpMaskedSaturatedSubInt16x8: + return rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v) + case OpMaskedSaturatedSubInt8x16: + return rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v) + case OpMaskedSaturatedSubInt8x32: + return rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v) + case OpMaskedSaturatedSubInt8x64: + return rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v) + case OpMaskedSaturatedSubUint16x16: + return rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v) + case OpMaskedSaturatedSubUint16x32: + return rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v) + case OpMaskedSaturatedSubUint16x8: + return rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v) + case OpMaskedSaturatedSubUint8x16: + return rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v) + case OpMaskedSaturatedSubUint8x32: + return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) + case OpMaskedSaturatedSubUint8x64: + return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) + case OpMaskedSqrtFloat32x16: + return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) + case OpMaskedSqrtFloat32x4: + return rewriteValueAMD64_OpMaskedSqrtFloat32x4(v) + case OpMaskedSqrtFloat32x8: + return rewriteValueAMD64_OpMaskedSqrtFloat32x8(v) + case OpMaskedSqrtFloat64x2: + return rewriteValueAMD64_OpMaskedSqrtFloat64x2(v) + case OpMaskedSqrtFloat64x4: + return rewriteValueAMD64_OpMaskedSqrtFloat64x4(v) + case OpMaskedSqrtFloat64x8: + return rewriteValueAMD64_OpMaskedSqrtFloat64x8(v) + case OpMaskedSubFloat32x16: + return rewriteValueAMD64_OpMaskedSubFloat32x16(v) + case OpMaskedSubFloat32x4: + return rewriteValueAMD64_OpMaskedSubFloat32x4(v) + case OpMaskedSubFloat32x8: + return rewriteValueAMD64_OpMaskedSubFloat32x8(v) + case OpMaskedSubFloat64x2: + return rewriteValueAMD64_OpMaskedSubFloat64x2(v) + case OpMaskedSubFloat64x4: + return rewriteValueAMD64_OpMaskedSubFloat64x4(v) + case OpMaskedSubFloat64x8: + return rewriteValueAMD64_OpMaskedSubFloat64x8(v) + case OpMaskedSubInt16x16: + return rewriteValueAMD64_OpMaskedSubInt16x16(v) + case OpMaskedSubInt16x32: + return rewriteValueAMD64_OpMaskedSubInt16x32(v) + case OpMaskedSubInt16x8: + return rewriteValueAMD64_OpMaskedSubInt16x8(v) + case OpMaskedSubInt32x16: + return rewriteValueAMD64_OpMaskedSubInt32x16(v) + case OpMaskedSubInt32x4: + return rewriteValueAMD64_OpMaskedSubInt32x4(v) + case OpMaskedSubInt32x8: + return rewriteValueAMD64_OpMaskedSubInt32x8(v) + case OpMaskedSubInt64x2: + return rewriteValueAMD64_OpMaskedSubInt64x2(v) + case OpMaskedSubInt64x4: + return rewriteValueAMD64_OpMaskedSubInt64x4(v) + case OpMaskedSubInt64x8: + return rewriteValueAMD64_OpMaskedSubInt64x8(v) + case OpMaskedSubInt8x16: + return rewriteValueAMD64_OpMaskedSubInt8x16(v) + case OpMaskedSubInt8x32: + return rewriteValueAMD64_OpMaskedSubInt8x32(v) + case OpMaskedSubInt8x64: + return rewriteValueAMD64_OpMaskedSubInt8x64(v) + case OpMaskedSubUint16x16: + return rewriteValueAMD64_OpMaskedSubUint16x16(v) + case OpMaskedSubUint16x32: + return rewriteValueAMD64_OpMaskedSubUint16x32(v) + case OpMaskedSubUint16x8: + return rewriteValueAMD64_OpMaskedSubUint16x8(v) + case OpMaskedSubUint32x16: + return rewriteValueAMD64_OpMaskedSubUint32x16(v) + case OpMaskedSubUint32x4: + return rewriteValueAMD64_OpMaskedSubUint32x4(v) + case OpMaskedSubUint32x8: + return rewriteValueAMD64_OpMaskedSubUint32x8(v) + case OpMaskedSubUint64x2: + return rewriteValueAMD64_OpMaskedSubUint64x2(v) + case OpMaskedSubUint64x4: + return rewriteValueAMD64_OpMaskedSubUint64x4(v) + case OpMaskedSubUint64x8: + return rewriteValueAMD64_OpMaskedSubUint64x8(v) + case OpMaskedSubUint8x16: + return rewriteValueAMD64_OpMaskedSubUint8x16(v) + case OpMaskedSubUint8x32: + return rewriteValueAMD64_OpMaskedSubUint8x32(v) + case OpMaskedSubUint8x64: + return rewriteValueAMD64_OpMaskedSubUint8x64(v) + case OpMaskedXorFloat32x16: + return rewriteValueAMD64_OpMaskedXorFloat32x16(v) + case OpMaskedXorFloat32x4: + return rewriteValueAMD64_OpMaskedXorFloat32x4(v) + case OpMaskedXorFloat32x8: + return rewriteValueAMD64_OpMaskedXorFloat32x8(v) + case OpMaskedXorFloat64x2: + return rewriteValueAMD64_OpMaskedXorFloat64x2(v) + case OpMaskedXorFloat64x4: + return rewriteValueAMD64_OpMaskedXorFloat64x4(v) + case OpMaskedXorFloat64x8: + return rewriteValueAMD64_OpMaskedXorFloat64x8(v) + case OpMaskedXorInt32x16: + return rewriteValueAMD64_OpMaskedXorInt32x16(v) + case OpMaskedXorInt32x4: + return rewriteValueAMD64_OpMaskedXorInt32x4(v) + case OpMaskedXorInt32x8: + return rewriteValueAMD64_OpMaskedXorInt32x8(v) + case OpMaskedXorInt64x2: + return rewriteValueAMD64_OpMaskedXorInt64x2(v) + case OpMaskedXorInt64x4: + return rewriteValueAMD64_OpMaskedXorInt64x4(v) + case OpMaskedXorInt64x8: + return rewriteValueAMD64_OpMaskedXorInt64x8(v) + case OpMaskedXorUint32x16: + return rewriteValueAMD64_OpMaskedXorUint32x16(v) + case OpMaskedXorUint32x4: + return rewriteValueAMD64_OpMaskedXorUint32x4(v) + case OpMaskedXorUint32x8: + return rewriteValueAMD64_OpMaskedXorUint32x8(v) + case OpMaskedXorUint64x2: + return rewriteValueAMD64_OpMaskedXorUint64x2(v) + case OpMaskedXorUint64x4: + return rewriteValueAMD64_OpMaskedXorUint64x4(v) + case OpMaskedXorUint64x8: + return rewriteValueAMD64_OpMaskedXorUint64x8(v) case OpMax32F: return rewriteValueAMD64_OpMax32F(v) case OpMax64F: return rewriteValueAMD64_OpMax64F(v) + case OpMaxFloat32x16: + return rewriteValueAMD64_OpMaxFloat32x16(v) + case OpMaxFloat32x4: + return rewriteValueAMD64_OpMaxFloat32x4(v) + case OpMaxFloat32x8: + return rewriteValueAMD64_OpMaxFloat32x8(v) + case OpMaxFloat64x2: + return rewriteValueAMD64_OpMaxFloat64x2(v) + case OpMaxFloat64x4: + return rewriteValueAMD64_OpMaxFloat64x4(v) + case OpMaxFloat64x8: + return rewriteValueAMD64_OpMaxFloat64x8(v) + case OpMaxInt16x16: + return rewriteValueAMD64_OpMaxInt16x16(v) + case OpMaxInt16x32: + return rewriteValueAMD64_OpMaxInt16x32(v) + case OpMaxInt16x8: + return rewriteValueAMD64_OpMaxInt16x8(v) + case OpMaxInt32x16: + return rewriteValueAMD64_OpMaxInt32x16(v) + case OpMaxInt32x4: + return rewriteValueAMD64_OpMaxInt32x4(v) + case OpMaxInt32x8: + return rewriteValueAMD64_OpMaxInt32x8(v) + case OpMaxInt64x2: + return rewriteValueAMD64_OpMaxInt64x2(v) + case OpMaxInt64x4: + return rewriteValueAMD64_OpMaxInt64x4(v) + case OpMaxInt64x8: + return rewriteValueAMD64_OpMaxInt64x8(v) + case OpMaxInt8x16: + return rewriteValueAMD64_OpMaxInt8x16(v) + case OpMaxInt8x32: + return rewriteValueAMD64_OpMaxInt8x32(v) + case OpMaxInt8x64: + return rewriteValueAMD64_OpMaxInt8x64(v) + case OpMaxUint16x16: + return rewriteValueAMD64_OpMaxUint16x16(v) + case OpMaxUint16x32: + return rewriteValueAMD64_OpMaxUint16x32(v) + case OpMaxUint16x8: + return rewriteValueAMD64_OpMaxUint16x8(v) + case OpMaxUint32x16: + return rewriteValueAMD64_OpMaxUint32x16(v) + case OpMaxUint32x4: + return rewriteValueAMD64_OpMaxUint32x4(v) + case OpMaxUint32x8: + return rewriteValueAMD64_OpMaxUint32x8(v) + case OpMaxUint64x2: + return rewriteValueAMD64_OpMaxUint64x2(v) + case OpMaxUint64x4: + return rewriteValueAMD64_OpMaxUint64x4(v) + case OpMaxUint64x8: + return rewriteValueAMD64_OpMaxUint64x8(v) + case OpMaxUint8x16: + return rewriteValueAMD64_OpMaxUint8x16(v) + case OpMaxUint8x32: + return rewriteValueAMD64_OpMaxUint8x32(v) + case OpMaxUint8x64: + return rewriteValueAMD64_OpMaxUint8x64(v) case OpMin32F: return rewriteValueAMD64_OpMin32F(v) case OpMin64F: return rewriteValueAMD64_OpMin64F(v) + case OpMinFloat32x16: + return rewriteValueAMD64_OpMinFloat32x16(v) + case OpMinFloat32x4: + return rewriteValueAMD64_OpMinFloat32x4(v) + case OpMinFloat32x8: + return rewriteValueAMD64_OpMinFloat32x8(v) + case OpMinFloat64x2: + return rewriteValueAMD64_OpMinFloat64x2(v) + case OpMinFloat64x4: + return rewriteValueAMD64_OpMinFloat64x4(v) + case OpMinFloat64x8: + return rewriteValueAMD64_OpMinFloat64x8(v) + case OpMinInt16x16: + return rewriteValueAMD64_OpMinInt16x16(v) + case OpMinInt16x32: + return rewriteValueAMD64_OpMinInt16x32(v) + case OpMinInt16x8: + return rewriteValueAMD64_OpMinInt16x8(v) + case OpMinInt32x16: + return rewriteValueAMD64_OpMinInt32x16(v) + case OpMinInt32x4: + return rewriteValueAMD64_OpMinInt32x4(v) + case OpMinInt32x8: + return rewriteValueAMD64_OpMinInt32x8(v) + case OpMinInt64x2: + return rewriteValueAMD64_OpMinInt64x2(v) + case OpMinInt64x4: + return rewriteValueAMD64_OpMinInt64x4(v) + case OpMinInt64x8: + return rewriteValueAMD64_OpMinInt64x8(v) + case OpMinInt8x16: + return rewriteValueAMD64_OpMinInt8x16(v) + case OpMinInt8x32: + return rewriteValueAMD64_OpMinInt8x32(v) + case OpMinInt8x64: + return rewriteValueAMD64_OpMinInt8x64(v) + case OpMinUint16x16: + return rewriteValueAMD64_OpMinUint16x16(v) + case OpMinUint16x32: + return rewriteValueAMD64_OpMinUint16x32(v) + case OpMinUint16x8: + return rewriteValueAMD64_OpMinUint16x8(v) + case OpMinUint32x16: + return rewriteValueAMD64_OpMinUint32x16(v) + case OpMinUint32x4: + return rewriteValueAMD64_OpMinUint32x4(v) + case OpMinUint32x8: + return rewriteValueAMD64_OpMinUint32x8(v) + case OpMinUint64x2: + return rewriteValueAMD64_OpMinUint64x2(v) + case OpMinUint64x4: + return rewriteValueAMD64_OpMinUint64x4(v) + case OpMinUint64x8: + return rewriteValueAMD64_OpMinUint64x8(v) + case OpMinUint8x16: + return rewriteValueAMD64_OpMinUint8x16(v) + case OpMinUint8x32: + return rewriteValueAMD64_OpMinUint8x32(v) + case OpMinUint8x64: + return rewriteValueAMD64_OpMinUint8x64(v) case OpMod16: return rewriteValueAMD64_OpMod16(v) case OpMod16u: @@ -946,6 +2616,80 @@ func rewriteValueAMD64(v *Value) bool { case OpMul8: v.Op = OpAMD64MULL return true + case OpMulByPowOf2Float32x16: + return rewriteValueAMD64_OpMulByPowOf2Float32x16(v) + case OpMulByPowOf2Float32x4: + return rewriteValueAMD64_OpMulByPowOf2Float32x4(v) + case OpMulByPowOf2Float32x8: + return rewriteValueAMD64_OpMulByPowOf2Float32x8(v) + case OpMulByPowOf2Float64x2: + return rewriteValueAMD64_OpMulByPowOf2Float64x2(v) + case OpMulByPowOf2Float64x4: + return rewriteValueAMD64_OpMulByPowOf2Float64x4(v) + case OpMulByPowOf2Float64x8: + return rewriteValueAMD64_OpMulByPowOf2Float64x8(v) + case OpMulEvenWidenInt32x4: + return rewriteValueAMD64_OpMulEvenWidenInt32x4(v) + case OpMulEvenWidenInt32x8: + return rewriteValueAMD64_OpMulEvenWidenInt32x8(v) + case OpMulEvenWidenInt64x2: + return rewriteValueAMD64_OpMulEvenWidenInt64x2(v) + case OpMulEvenWidenInt64x4: + return rewriteValueAMD64_OpMulEvenWidenInt64x4(v) + case OpMulEvenWidenInt64x8: + return rewriteValueAMD64_OpMulEvenWidenInt64x8(v) + case OpMulEvenWidenUint32x4: + return rewriteValueAMD64_OpMulEvenWidenUint32x4(v) + case OpMulEvenWidenUint32x8: + return rewriteValueAMD64_OpMulEvenWidenUint32x8(v) + case OpMulEvenWidenUint64x2: + return rewriteValueAMD64_OpMulEvenWidenUint64x2(v) + case OpMulEvenWidenUint64x4: + return rewriteValueAMD64_OpMulEvenWidenUint64x4(v) + case OpMulEvenWidenUint64x8: + return rewriteValueAMD64_OpMulEvenWidenUint64x8(v) + case OpMulFloat32x16: + return rewriteValueAMD64_OpMulFloat32x16(v) + case OpMulFloat32x4: + return rewriteValueAMD64_OpMulFloat32x4(v) + case OpMulFloat32x8: + return rewriteValueAMD64_OpMulFloat32x8(v) + case OpMulFloat64x2: + return rewriteValueAMD64_OpMulFloat64x2(v) + case OpMulFloat64x4: + return rewriteValueAMD64_OpMulFloat64x4(v) + case OpMulFloat64x8: + return rewriteValueAMD64_OpMulFloat64x8(v) + case OpMulHighInt16x16: + return rewriteValueAMD64_OpMulHighInt16x16(v) + case OpMulHighInt16x32: + return rewriteValueAMD64_OpMulHighInt16x32(v) + case OpMulHighInt16x8: + return rewriteValueAMD64_OpMulHighInt16x8(v) + case OpMulHighUint16x16: + return rewriteValueAMD64_OpMulHighUint16x16(v) + case OpMulHighUint16x32: + return rewriteValueAMD64_OpMulHighUint16x32(v) + case OpMulHighUint16x8: + return rewriteValueAMD64_OpMulHighUint16x8(v) + case OpMulLowInt16x16: + return rewriteValueAMD64_OpMulLowInt16x16(v) + case OpMulLowInt16x32: + return rewriteValueAMD64_OpMulLowInt16x32(v) + case OpMulLowInt16x8: + return rewriteValueAMD64_OpMulLowInt16x8(v) + case OpMulLowInt32x16: + return rewriteValueAMD64_OpMulLowInt32x16(v) + case OpMulLowInt32x4: + return rewriteValueAMD64_OpMulLowInt32x4(v) + case OpMulLowInt32x8: + return rewriteValueAMD64_OpMulLowInt32x8(v) + case OpMulLowInt64x2: + return rewriteValueAMD64_OpMulLowInt64x2(v) + case OpMulLowInt64x4: + return rewriteValueAMD64_OpMulLowInt64x4(v) + case OpMulLowInt64x8: + return rewriteValueAMD64_OpMulLowInt64x8(v) case OpNeg16: v.Op = OpAMD64NEGL return true @@ -983,6 +2727,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpNot: return rewriteValueAMD64_OpNot(v) + case OpNotEqualFloat32x16: + return rewriteValueAMD64_OpNotEqualFloat32x16(v) + case OpNotEqualFloat32x4: + return rewriteValueAMD64_OpNotEqualFloat32x4(v) + case OpNotEqualFloat32x8: + return rewriteValueAMD64_OpNotEqualFloat32x8(v) + case OpNotEqualFloat64x2: + return rewriteValueAMD64_OpNotEqualFloat64x2(v) + case OpNotEqualFloat64x4: + return rewriteValueAMD64_OpNotEqualFloat64x4(v) + case OpNotEqualFloat64x8: + return rewriteValueAMD64_OpNotEqualFloat64x8(v) + case OpNotEqualInt16x16: + return rewriteValueAMD64_OpNotEqualInt16x16(v) + case OpNotEqualInt16x32: + return rewriteValueAMD64_OpNotEqualInt16x32(v) + case OpNotEqualInt16x8: + return rewriteValueAMD64_OpNotEqualInt16x8(v) + case OpNotEqualInt32x16: + return rewriteValueAMD64_OpNotEqualInt32x16(v) + case OpNotEqualInt32x4: + return rewriteValueAMD64_OpNotEqualInt32x4(v) + case OpNotEqualInt32x8: + return rewriteValueAMD64_OpNotEqualInt32x8(v) + case OpNotEqualInt64x2: + return rewriteValueAMD64_OpNotEqualInt64x2(v) + case OpNotEqualInt64x4: + return rewriteValueAMD64_OpNotEqualInt64x4(v) + case OpNotEqualInt64x8: + return rewriteValueAMD64_OpNotEqualInt64x8(v) + case OpNotEqualInt8x16: + return rewriteValueAMD64_OpNotEqualInt8x16(v) + case OpNotEqualInt8x32: + return rewriteValueAMD64_OpNotEqualInt8x32(v) + case OpNotEqualInt8x64: + return rewriteValueAMD64_OpNotEqualInt8x64(v) + case OpNotEqualUint16x16: + return rewriteValueAMD64_OpNotEqualUint16x16(v) + case OpNotEqualUint16x32: + return rewriteValueAMD64_OpNotEqualUint16x32(v) + case OpNotEqualUint16x8: + return rewriteValueAMD64_OpNotEqualUint16x8(v) + case OpNotEqualUint32x16: + return rewriteValueAMD64_OpNotEqualUint32x16(v) + case OpNotEqualUint32x4: + return rewriteValueAMD64_OpNotEqualUint32x4(v) + case OpNotEqualUint32x8: + return rewriteValueAMD64_OpNotEqualUint32x8(v) + case OpNotEqualUint64x2: + return rewriteValueAMD64_OpNotEqualUint64x2(v) + case OpNotEqualUint64x4: + return rewriteValueAMD64_OpNotEqualUint64x4(v) + case OpNotEqualUint64x8: + return rewriteValueAMD64_OpNotEqualUint64x8(v) + case OpNotEqualUint8x16: + return rewriteValueAMD64_OpNotEqualUint8x16(v) + case OpNotEqualUint8x32: + return rewriteValueAMD64_OpNotEqualUint8x32(v) + case OpNotEqualUint8x64: + return rewriteValueAMD64_OpNotEqualUint8x64(v) case OpOffPtr: return rewriteValueAMD64_OpOffPtr(v) case OpOr16: @@ -1000,6 +2804,106 @@ func rewriteValueAMD64(v *Value) bool { case OpOrB: v.Op = OpAMD64ORL return true + case OpOrFloat32x16: + return rewriteValueAMD64_OpOrFloat32x16(v) + case OpOrFloat32x4: + return rewriteValueAMD64_OpOrFloat32x4(v) + case OpOrFloat32x8: + return rewriteValueAMD64_OpOrFloat32x8(v) + case OpOrFloat64x2: + return rewriteValueAMD64_OpOrFloat64x2(v) + case OpOrFloat64x4: + return rewriteValueAMD64_OpOrFloat64x4(v) + case OpOrFloat64x8: + return rewriteValueAMD64_OpOrFloat64x8(v) + case OpOrInt16x16: + return rewriteValueAMD64_OpOrInt16x16(v) + case OpOrInt16x8: + return rewriteValueAMD64_OpOrInt16x8(v) + case OpOrInt32x16: + return rewriteValueAMD64_OpOrInt32x16(v) + case OpOrInt32x4: + return rewriteValueAMD64_OpOrInt32x4(v) + case OpOrInt32x8: + return rewriteValueAMD64_OpOrInt32x8(v) + case OpOrInt64x2: + return rewriteValueAMD64_OpOrInt64x2(v) + case OpOrInt64x4: + return rewriteValueAMD64_OpOrInt64x4(v) + case OpOrInt64x8: + return rewriteValueAMD64_OpOrInt64x8(v) + case OpOrInt8x16: + return rewriteValueAMD64_OpOrInt8x16(v) + case OpOrInt8x32: + return rewriteValueAMD64_OpOrInt8x32(v) + case OpOrUint16x16: + return rewriteValueAMD64_OpOrUint16x16(v) + case OpOrUint16x8: + return rewriteValueAMD64_OpOrUint16x8(v) + case OpOrUint32x16: + return rewriteValueAMD64_OpOrUint32x16(v) + case OpOrUint32x4: + return rewriteValueAMD64_OpOrUint32x4(v) + case OpOrUint32x8: + return rewriteValueAMD64_OpOrUint32x8(v) + case OpOrUint64x2: + return rewriteValueAMD64_OpOrUint64x2(v) + case OpOrUint64x4: + return rewriteValueAMD64_OpOrUint64x4(v) + case OpOrUint64x8: + return rewriteValueAMD64_OpOrUint64x8(v) + case OpOrUint8x16: + return rewriteValueAMD64_OpOrUint8x16(v) + case OpOrUint8x32: + return rewriteValueAMD64_OpOrUint8x32(v) + case OpPairwiseAddFloat32x4: + return rewriteValueAMD64_OpPairwiseAddFloat32x4(v) + case OpPairwiseAddFloat32x8: + return rewriteValueAMD64_OpPairwiseAddFloat32x8(v) + case OpPairwiseAddFloat64x2: + return rewriteValueAMD64_OpPairwiseAddFloat64x2(v) + case OpPairwiseAddFloat64x4: + return rewriteValueAMD64_OpPairwiseAddFloat64x4(v) + case OpPairwiseAddInt16x16: + return rewriteValueAMD64_OpPairwiseAddInt16x16(v) + case OpPairwiseAddInt16x8: + return rewriteValueAMD64_OpPairwiseAddInt16x8(v) + case OpPairwiseAddInt32x4: + return rewriteValueAMD64_OpPairwiseAddInt32x4(v) + case OpPairwiseAddInt32x8: + return rewriteValueAMD64_OpPairwiseAddInt32x8(v) + case OpPairwiseAddUint16x16: + return rewriteValueAMD64_OpPairwiseAddUint16x16(v) + case OpPairwiseAddUint16x8: + return rewriteValueAMD64_OpPairwiseAddUint16x8(v) + case OpPairwiseAddUint32x4: + return rewriteValueAMD64_OpPairwiseAddUint32x4(v) + case OpPairwiseAddUint32x8: + return rewriteValueAMD64_OpPairwiseAddUint32x8(v) + case OpPairwiseSubFloat32x4: + return rewriteValueAMD64_OpPairwiseSubFloat32x4(v) + case OpPairwiseSubFloat32x8: + return rewriteValueAMD64_OpPairwiseSubFloat32x8(v) + case OpPairwiseSubFloat64x2: + return rewriteValueAMD64_OpPairwiseSubFloat64x2(v) + case OpPairwiseSubFloat64x4: + return rewriteValueAMD64_OpPairwiseSubFloat64x4(v) + case OpPairwiseSubInt16x16: + return rewriteValueAMD64_OpPairwiseSubInt16x16(v) + case OpPairwiseSubInt16x8: + return rewriteValueAMD64_OpPairwiseSubInt16x8(v) + case OpPairwiseSubInt32x4: + return rewriteValueAMD64_OpPairwiseSubInt32x4(v) + case OpPairwiseSubInt32x8: + return rewriteValueAMD64_OpPairwiseSubInt32x8(v) + case OpPairwiseSubUint16x16: + return rewriteValueAMD64_OpPairwiseSubUint16x16(v) + case OpPairwiseSubUint16x8: + return rewriteValueAMD64_OpPairwiseSubUint16x8(v) + case OpPairwiseSubUint32x4: + return rewriteValueAMD64_OpPairwiseSubUint32x4(v) + case OpPairwiseSubUint32x8: + return rewriteValueAMD64_OpPairwiseSubUint32x8(v) case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) case OpPopCount16: @@ -1012,6 +2916,54 @@ func rewriteValueAMD64(v *Value) bool { return true case OpPopCount8: return rewriteValueAMD64_OpPopCount8(v) + case OpPopCountInt16x16: + return rewriteValueAMD64_OpPopCountInt16x16(v) + case OpPopCountInt16x32: + return rewriteValueAMD64_OpPopCountInt16x32(v) + case OpPopCountInt16x8: + return rewriteValueAMD64_OpPopCountInt16x8(v) + case OpPopCountInt32x16: + return rewriteValueAMD64_OpPopCountInt32x16(v) + case OpPopCountInt32x4: + return rewriteValueAMD64_OpPopCountInt32x4(v) + case OpPopCountInt32x8: + return rewriteValueAMD64_OpPopCountInt32x8(v) + case OpPopCountInt64x2: + return rewriteValueAMD64_OpPopCountInt64x2(v) + case OpPopCountInt64x4: + return rewriteValueAMD64_OpPopCountInt64x4(v) + case OpPopCountInt64x8: + return rewriteValueAMD64_OpPopCountInt64x8(v) + case OpPopCountInt8x16: + return rewriteValueAMD64_OpPopCountInt8x16(v) + case OpPopCountInt8x32: + return rewriteValueAMD64_OpPopCountInt8x32(v) + case OpPopCountInt8x64: + return rewriteValueAMD64_OpPopCountInt8x64(v) + case OpPopCountUint16x16: + return rewriteValueAMD64_OpPopCountUint16x16(v) + case OpPopCountUint16x32: + return rewriteValueAMD64_OpPopCountUint16x32(v) + case OpPopCountUint16x8: + return rewriteValueAMD64_OpPopCountUint16x8(v) + case OpPopCountUint32x16: + return rewriteValueAMD64_OpPopCountUint32x16(v) + case OpPopCountUint32x4: + return rewriteValueAMD64_OpPopCountUint32x4(v) + case OpPopCountUint32x8: + return rewriteValueAMD64_OpPopCountUint32x8(v) + case OpPopCountUint64x2: + return rewriteValueAMD64_OpPopCountUint64x2(v) + case OpPopCountUint64x4: + return rewriteValueAMD64_OpPopCountUint64x4(v) + case OpPopCountUint64x8: + return rewriteValueAMD64_OpPopCountUint64x8(v) + case OpPopCountUint8x16: + return rewriteValueAMD64_OpPopCountUint8x16(v) + case OpPopCountUint8x32: + return rewriteValueAMD64_OpPopCountUint8x32(v) + case OpPopCountUint8x64: + return rewriteValueAMD64_OpPopCountUint8x64(v) case OpPrefetchCache: v.Op = OpAMD64PrefetchT0 return true @@ -1102,6 +3054,62 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) + case OpSaturatedAddInt16x16: + return rewriteValueAMD64_OpSaturatedAddInt16x16(v) + case OpSaturatedAddInt16x32: + return rewriteValueAMD64_OpSaturatedAddInt16x32(v) + case OpSaturatedAddInt16x8: + return rewriteValueAMD64_OpSaturatedAddInt16x8(v) + case OpSaturatedAddInt8x16: + return rewriteValueAMD64_OpSaturatedAddInt8x16(v) + case OpSaturatedAddInt8x32: + return rewriteValueAMD64_OpSaturatedAddInt8x32(v) + case OpSaturatedAddInt8x64: + return rewriteValueAMD64_OpSaturatedAddInt8x64(v) + case OpSaturatedAddUint16x16: + return rewriteValueAMD64_OpSaturatedAddUint16x16(v) + case OpSaturatedAddUint16x32: + return rewriteValueAMD64_OpSaturatedAddUint16x32(v) + case OpSaturatedAddUint16x8: + return rewriteValueAMD64_OpSaturatedAddUint16x8(v) + case OpSaturatedAddUint8x16: + return rewriteValueAMD64_OpSaturatedAddUint8x16(v) + case OpSaturatedAddUint8x32: + return rewriteValueAMD64_OpSaturatedAddUint8x32(v) + case OpSaturatedAddUint8x64: + return rewriteValueAMD64_OpSaturatedAddUint8x64(v) + case OpSaturatedPairwiseAddInt16x16: + return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v) + case OpSaturatedPairwiseAddInt16x8: + return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v) + case OpSaturatedPairwiseSubInt16x16: + return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v) + case OpSaturatedPairwiseSubInt16x8: + return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v) + case OpSaturatedSubInt16x16: + return rewriteValueAMD64_OpSaturatedSubInt16x16(v) + case OpSaturatedSubInt16x32: + return rewriteValueAMD64_OpSaturatedSubInt16x32(v) + case OpSaturatedSubInt16x8: + return rewriteValueAMD64_OpSaturatedSubInt16x8(v) + case OpSaturatedSubInt8x16: + return rewriteValueAMD64_OpSaturatedSubInt8x16(v) + case OpSaturatedSubInt8x32: + return rewriteValueAMD64_OpSaturatedSubInt8x32(v) + case OpSaturatedSubInt8x64: + return rewriteValueAMD64_OpSaturatedSubInt8x64(v) + case OpSaturatedSubUint16x16: + return rewriteValueAMD64_OpSaturatedSubUint16x16(v) + case OpSaturatedSubUint16x32: + return rewriteValueAMD64_OpSaturatedSubUint16x32(v) + case OpSaturatedSubUint16x8: + return rewriteValueAMD64_OpSaturatedSubUint16x8(v) + case OpSaturatedSubUint8x16: + return rewriteValueAMD64_OpSaturatedSubUint8x16(v) + case OpSaturatedSubUint8x32: + return rewriteValueAMD64_OpSaturatedSubUint8x32(v) + case OpSaturatedSubUint8x64: + return rewriteValueAMD64_OpSaturatedSubUint8x64(v) case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -1126,6 +3134,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSignExt8to64: v.Op = OpAMD64MOVBQSX return true + case OpSignInt16x16: + return rewriteValueAMD64_OpSignInt16x16(v) + case OpSignInt16x8: + return rewriteValueAMD64_OpSignInt16x8(v) + case OpSignInt32x4: + return rewriteValueAMD64_OpSignInt32x4(v) + case OpSignInt32x8: + return rewriteValueAMD64_OpSignInt32x8(v) + case OpSignInt8x16: + return rewriteValueAMD64_OpSignInt8x16(v) + case OpSignInt8x32: + return rewriteValueAMD64_OpSignInt8x32(v) case OpSlicemask: return rewriteValueAMD64_OpSlicemask(v) case OpSpectreIndex: @@ -1138,6 +3158,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSqrt32: v.Op = OpAMD64SQRTSS return true + case OpSqrtFloat32x16: + return rewriteValueAMD64_OpSqrtFloat32x16(v) + case OpSqrtFloat32x4: + return rewriteValueAMD64_OpSqrtFloat32x4(v) + case OpSqrtFloat32x8: + return rewriteValueAMD64_OpSqrtFloat32x8(v) + case OpSqrtFloat64x2: + return rewriteValueAMD64_OpSqrtFloat64x2(v) + case OpSqrtFloat64x4: + return rewriteValueAMD64_OpSqrtFloat64x4(v) + case OpSqrtFloat64x8: + return rewriteValueAMD64_OpSqrtFloat64x8(v) case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -1161,9 +3193,69 @@ func rewriteValueAMD64(v *Value) bool { case OpSub8: v.Op = OpAMD64SUBL return true + case OpSubFloat32x16: + return rewriteValueAMD64_OpSubFloat32x16(v) + case OpSubFloat32x4: + return rewriteValueAMD64_OpSubFloat32x4(v) + case OpSubFloat32x8: + return rewriteValueAMD64_OpSubFloat32x8(v) + case OpSubFloat64x2: + return rewriteValueAMD64_OpSubFloat64x2(v) + case OpSubFloat64x4: + return rewriteValueAMD64_OpSubFloat64x4(v) + case OpSubFloat64x8: + return rewriteValueAMD64_OpSubFloat64x8(v) + case OpSubInt16x16: + return rewriteValueAMD64_OpSubInt16x16(v) + case OpSubInt16x32: + return rewriteValueAMD64_OpSubInt16x32(v) + case OpSubInt16x8: + return rewriteValueAMD64_OpSubInt16x8(v) + case OpSubInt32x16: + return rewriteValueAMD64_OpSubInt32x16(v) + case OpSubInt32x4: + return rewriteValueAMD64_OpSubInt32x4(v) + case OpSubInt32x8: + return rewriteValueAMD64_OpSubInt32x8(v) + case OpSubInt64x2: + return rewriteValueAMD64_OpSubInt64x2(v) + case OpSubInt64x4: + return rewriteValueAMD64_OpSubInt64x4(v) + case OpSubInt64x8: + return rewriteValueAMD64_OpSubInt64x8(v) + case OpSubInt8x16: + return rewriteValueAMD64_OpSubInt8x16(v) + case OpSubInt8x32: + return rewriteValueAMD64_OpSubInt8x32(v) + case OpSubInt8x64: + return rewriteValueAMD64_OpSubInt8x64(v) case OpSubPtr: v.Op = OpAMD64SUBQ return true + case OpSubUint16x16: + return rewriteValueAMD64_OpSubUint16x16(v) + case OpSubUint16x32: + return rewriteValueAMD64_OpSubUint16x32(v) + case OpSubUint16x8: + return rewriteValueAMD64_OpSubUint16x8(v) + case OpSubUint32x16: + return rewriteValueAMD64_OpSubUint32x16(v) + case OpSubUint32x4: + return rewriteValueAMD64_OpSubUint32x4(v) + case OpSubUint32x8: + return rewriteValueAMD64_OpSubUint32x8(v) + case OpSubUint64x2: + return rewriteValueAMD64_OpSubUint64x2(v) + case OpSubUint64x4: + return rewriteValueAMD64_OpSubUint64x4(v) + case OpSubUint64x8: + return rewriteValueAMD64_OpSubUint64x8(v) + case OpSubUint8x16: + return rewriteValueAMD64_OpSubUint8x16(v) + case OpSubUint8x32: + return rewriteValueAMD64_OpSubUint8x32(v) + case OpSubUint8x64: + return rewriteValueAMD64_OpSubUint8x64(v) case OpTailCall: v.Op = OpAMD64CALLtail return true @@ -1202,6 +3294,58 @@ func rewriteValueAMD64(v *Value) bool { case OpXor8: v.Op = OpAMD64XORL return true + case OpXorFloat32x16: + return rewriteValueAMD64_OpXorFloat32x16(v) + case OpXorFloat32x4: + return rewriteValueAMD64_OpXorFloat32x4(v) + case OpXorFloat32x8: + return rewriteValueAMD64_OpXorFloat32x8(v) + case OpXorFloat64x2: + return rewriteValueAMD64_OpXorFloat64x2(v) + case OpXorFloat64x4: + return rewriteValueAMD64_OpXorFloat64x4(v) + case OpXorFloat64x8: + return rewriteValueAMD64_OpXorFloat64x8(v) + case OpXorInt16x16: + return rewriteValueAMD64_OpXorInt16x16(v) + case OpXorInt16x8: + return rewriteValueAMD64_OpXorInt16x8(v) + case OpXorInt32x16: + return rewriteValueAMD64_OpXorInt32x16(v) + case OpXorInt32x4: + return rewriteValueAMD64_OpXorInt32x4(v) + case OpXorInt32x8: + return rewriteValueAMD64_OpXorInt32x8(v) + case OpXorInt64x2: + return rewriteValueAMD64_OpXorInt64x2(v) + case OpXorInt64x4: + return rewriteValueAMD64_OpXorInt64x4(v) + case OpXorInt64x8: + return rewriteValueAMD64_OpXorInt64x8(v) + case OpXorInt8x16: + return rewriteValueAMD64_OpXorInt8x16(v) + case OpXorInt8x32: + return rewriteValueAMD64_OpXorInt8x32(v) + case OpXorUint16x16: + return rewriteValueAMD64_OpXorUint16x16(v) + case OpXorUint16x8: + return rewriteValueAMD64_OpXorUint16x8(v) + case OpXorUint32x16: + return rewriteValueAMD64_OpXorUint32x16(v) + case OpXorUint32x4: + return rewriteValueAMD64_OpXorUint32x4(v) + case OpXorUint32x8: + return rewriteValueAMD64_OpXorUint32x8(v) + case OpXorUint64x2: + return rewriteValueAMD64_OpXorUint64x2(v) + case OpXorUint64x4: + return rewriteValueAMD64_OpXorUint64x4(v) + case OpXorUint64x8: + return rewriteValueAMD64_OpXorUint64x8(v) + case OpXorUint8x16: + return rewriteValueAMD64_OpXorUint8x16(v) + case OpXorUint8x32: + return rewriteValueAMD64_OpXorUint8x32(v) case OpZero: return rewriteValueAMD64_OpZero(v) case OpZeroExt16to32: @@ -23906,4100 +26050,20295 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAddr(v *Value) bool { +func rewriteValueAMD64_OpAbsoluteInt16x16(v *Value) bool { v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) + // match: (AbsoluteInt16x16 x) + // result: (VPABSW256 x) for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + v.reset(OpAMD64VPABSW256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt16x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (AbsoluteInt16x32 x) + // result: (VPABSW512 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + v.reset(OpAMD64VPABSW512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt16x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (AbsoluteInt16x8 x) + // result: (VPABSW128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + v.reset(OpAMD64VPABSW128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt32x16(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) + // match: (AbsoluteInt32x16 x) + // result: (VPABSD512 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSD512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt32x4(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) + // match: (AbsoluteInt32x4 x) + // result: (VPABSD128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSD128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt32x8(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) + // match: (AbsoluteInt32x8 x) + // result: (VPABSD256 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSD256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt64x2(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) + // match: (AbsoluteInt64x2 x) + // result: (VPABSQ128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSQ128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt64x4(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) + // match: (AbsoluteInt64x4 x) + // result: (VPABSQ256 x) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + v.reset(OpAMD64VPABSQ256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt64x8(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) + // match: (AbsoluteInt64x8 x) + // result: (VPABSQ512 x) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + v.reset(OpAMD64VPABSQ512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt8x16(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) + // match: (AbsoluteInt8x16 x) + // result: (VPABSB128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) + x := v_0 + v.reset(OpAMD64VPABSB128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt8x32(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) + // match: (AbsoluteInt8x32 x) + // result: (VPABSB256 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) + x := v_0 + v.reset(OpAMD64VPABSB256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt8x64(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) + // match: (AbsoluteInt8x64 x) + // result: (VPABSB512 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) + x := v_0 + v.reset(OpAMD64VPABSB512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { +func rewriteValueAMD64_OpAddFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) + // match: (AddFloat32x16 x y) + // result: (VADDPS512 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { +func rewriteValueAMD64_OpAddFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) + // match: (AddFloat32x4 x y) + // result: (VADDPS128 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { +func rewriteValueAMD64_OpAddFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) + // match: (AddFloat32x8 x y) + // result: (VADDPS256 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { +func rewriteValueAMD64_OpAddFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) + // match: (AddFloat64x2 x y) + // result: (VADDPD128 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) + // match: (AddFloat64x4 x y) + // result: (VADDPD256 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) + // match: (AddFloat64x8 x y) + // result: (VADDPD512 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) + // match: (AddInt16x16 x y) + // result: (VPADDW256 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) + // match: (AddInt16x32 x y) + // result: (VPADDW512 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDW512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) + // match: (AddInt16x8 x y) + // result: (VPADDW128 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddInt32x16 x y) + // result: (VPADDD512 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) + // match: (AddInt32x4 x y) + // result: (VPADDD128 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddInt32x8 x y) + // result: (VPADDD256 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpBitLen16(v *Value) bool { +func rewriteValueAMD64_OpAddInt64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) + // match: (AddInt64x2 x y) + // result: (VPADDQ128 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDQ128) + v.AddArg2(y, x) return true } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) +} +func rewriteValueAMD64_OpAddInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddInt64x4 x y) + // result: (VPADDQ256 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDQ256) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBitLen32(v *Value) bool { +func rewriteValueAMD64_OpAddInt64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + // match: (AddInt64x8 x y) + // result: (VPADDQ512 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDQ512) + v.AddArg2(y, x) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) +} +func rewriteValueAMD64_OpAddInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddInt8x16 x y) + // result: (VPADDB128 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDB128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBitLen64(v *Value) bool { +func rewriteValueAMD64_OpAddInt8x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + // match: (AddInt8x32 x y) + // result: (VPADDB256 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDB256) + v.AddArg2(y, x) return true } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) +} +func rewriteValueAMD64_OpAddInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddInt8x64 x y) + // result: (VPADDB512 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDB512) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBitLen8(v *Value) bool { +func rewriteValueAMD64_OpAddUint16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + // match: (AddUint16x16 x y) + // result: (VPADDW256 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDW256) + v.AddArg2(y, x) return true } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) +} +func rewriteValueAMD64_OpAddUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint16x32 x y) + // result: (VPADDW512 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDW512) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBswap16(v *Value) bool { +func rewriteValueAMD64_OpAddUint16x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) + // match: (AddUint16x8 x y) + // result: (VPADDW128 y x) for { x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPADDW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpCeil(v *Value) bool { +func rewriteValueAMD64_OpAddUint32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) + // match: (AddUint32x16 x y) + // result: (VPADDD512 y x) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPADDD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpCondSelect(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) + // match: (AddUint32x4 x y) + // result: (VPADDD128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDD128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) +} +func rewriteValueAMD64_OpAddUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint32x8 x y) + // result: (VPADDD256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDD256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) +} +func rewriteValueAMD64_OpAddUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint64x2 x y) + // result: (VPADDQ128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDQ128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) +} +func rewriteValueAMD64_OpAddUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint64x4 x y) + // result: (VPADDQ256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDQ256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) +} +func rewriteValueAMD64_OpAddUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint64x8 x y) + // result: (VPADDQ512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDQ512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) +} +func rewriteValueAMD64_OpAddUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint8x16 x y) + // result: (VPADDB128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDB128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) +} +func rewriteValueAMD64_OpAddUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint8x32 x y) + // result: (VPADDB256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDB256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) +} +func rewriteValueAMD64_OpAddUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint8x64 x y) + // result: (VPADDB512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDB512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) +} +func rewriteValueAMD64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueAMD64_OpAndFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat32x16 x y) + // result: (VANDPS512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPS512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) +} +func rewriteValueAMD64_OpAndFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat32x4 x y) + // result: (VANDPS128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPS128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) +} +func rewriteValueAMD64_OpAndFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat32x8 x y) + // result: (VANDPS256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPS256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) +} +func rewriteValueAMD64_OpAndFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat64x2 x y) + // result: (VANDPD128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPD128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) +} +func rewriteValueAMD64_OpAndFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat64x4 x y) + // result: (VANDPD256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPD256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) +} +func rewriteValueAMD64_OpAndFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat64x8 x y) + // result: (VANDPD512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPD512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) +} +func rewriteValueAMD64_OpAndInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt16x16 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) +} +func rewriteValueAMD64_OpAndInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt16x8 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) +} +func rewriteValueAMD64_OpAndInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt32x16 x y) + // result: (VPANDD512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPANDD512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) +} +func rewriteValueAMD64_OpAndInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt32x4 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) +} +func rewriteValueAMD64_OpAndInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt32x8 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) +} +func rewriteValueAMD64_OpAndInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt64x2 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) +} +func rewriteValueAMD64_OpAndInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt64x4 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) +} +func rewriteValueAMD64_OpAndInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt64x8 x y) + // result: (VPANDQ512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPANDQ512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) +} +func rewriteValueAMD64_OpAndInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt8x16 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) +} +func rewriteValueAMD64_OpAndInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt8x32 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat32x16 x y) + // result: (VANDNPS512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPS512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat32x4 x y) + // result: (VANDNPS128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPS128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat32x8 x y) + // result: (VANDNPS256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPS256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat64x2 x y) + // result: (VANDNPD128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPD128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) +} +func rewriteValueAMD64_OpAndNotFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat64x4 x y) + // result: (VANDNPD256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPD256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) +} +func rewriteValueAMD64_OpAndNotFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat64x8 x y) + // result: (VANDNPD512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPD512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) +} +func rewriteValueAMD64_OpAndNotInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt16x16 x y) + // result: (VPANDN256 y x) for { - t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt16x8 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt32x16 x y) + // result: (VPANDND512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDND512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt32x4 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt32x8 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt64x2 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt64x4 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt64x8 x y) + // result: (VPANDNQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDNQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt8x16 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt8x32 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint16x16 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint16x8 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint32x16 x y) + // result: (VPANDND512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDND512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint32x4 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint32x8 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint64x2 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint64x4 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint64x8 x y) + // result: (VPANDNQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDNQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint8x16 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint8x32 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint16x16 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint16x8 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint32x16 x y) + // result: (VPANDD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint32x4 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint32x8 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint64x2 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint64x4 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint64x8 x y) + // result: (VPANDQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint8x16 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint8x32 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat32x16 x) + // result: (VRCP14PS512 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PS512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat32x4 x) + // result: (VRCP14PS128 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PS128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat32x8 x) + // result: (VRCP14PS256 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PS256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat64x2 x) + // result: (VRCP14PD128 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat64x4 x) + // result: (VRCP14PD256 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat64x8 x) + // result: (VRCP14PD512 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat32x16 x) + // result: (VRSQRT14PS512 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PS512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat32x4 x) + // result: (VRSQRTPS128 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRTPS128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat32x8 x) + // result: (VRSQRTPS256 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRTPS256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat64x2 x) + // result: (VRSQRT14PD128 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat64x4 x) + // result: (VRSQRT14PD256 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat64x8 x) + // result: (VRSQRT14PD512 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAverageUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint16x16 x y) + // result: (VPAVGW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint16x32 x y) + // result: (VPAVGW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint16x8 x y) + // result: (VPAVGW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint8x16 x y) + // result: (VPAVGB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint8x32 x y) + // result: (VPAVGB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint8x64 x y) + // result: (VPAVGB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpBitLen16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) + return true + } + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBswap16(v *Value) bool { + v_0 := v.Args[0] + // match: (Bswap16 x) + // result: (ROLWconst [8] x) + for { + x := v_0 + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeil(v *Value) bool { + v_0 := v.Args[0] + // match: (Ceil x) + // result: (ROUNDSD [2] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) + for { + t := v.Type x := v_0 y := v_1 if v_2.Op != OpAMD64SETL { break } - cond := v_2.Args[0] - if !(is16BitInt(t)) { + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) + for { + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) + return true + } +} +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) + for { + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueAMD64_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) + for { + x := v_0 + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) + return true + } + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) + return true + } + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) + for { + x := v_0 + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDivFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat32x16 x y) + // result: (VDIVPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat32x4 x y) + // result: (VDIVPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat32x8 x y) + // result: (VDIVPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat64x2 x y) + // result: (VDIVPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat64x4 x y) + // result: (VDIVPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat64x8 x y) + // result: (VDIVPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt16x16 x y) + // result: (VPCMPEQW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt16x8 x y) + // result: (VPCMPEQW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt32x4 x y) + // result: (VPCMPEQD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt32x8 x y) + // result: (VPCMPEQD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt64x2 x y) + // result: (VPCMPEQQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt64x4 x y) + // result: (VPCMPEQQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt8x16 x y) + // result: (VPCMPEQB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt8x32 x y) + // result: (VPCMPEQB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) + for { + x := v_0 + y := v_1 + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) + return true + } +} +func rewriteValueAMD64_OpFloor(v *Value) bool { + v_0 := v.Args[0] + // match: (Floor x) + // result: (ROUNDSD [1] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetG(v *Value) bool { + v_0 := v.Args[0] + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) + for { + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt16x16 x y) + // result: (VPCMPGTW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt16x8 x y) + // result: (VPCMPGTW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt32x4 x y) + // result: (VPCMPGTD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt32x8 x y) + // result: (VPCMPGTD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt64x4 x y) + // result: (VPCMPGTQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt8x16 x y) + // result: (VPCMPGTB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt8x32 x y) + // result: (VPCMPGTB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + for { + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) + for { + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16U x y) + // result: (SETB (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (SETL (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U x y) + // result: (SETB (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8 x y) + // result: (SETL (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8U x y) + // result: (SETB (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { break } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt16x16 x mask) + // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt16x32 x mask) + // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt16x8 x mask) + // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt32x16 x mask) + // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt32x4 x mask) + // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt32x8 x mask) + // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt64x2 x mask) + // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt64x4 x mask) + // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt64x8 x mask) + // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt8x16 x mask) + // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt8x32 x mask) + // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt8x64 x mask) + // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat32x16 x y mask) + // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat32x4 x y mask) + // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat32x8 x y mask) + // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat64x2 x y mask) + // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat64x4 x y mask) + // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat64x8 x y mask) + // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt16x16 x y mask) + // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt16x32 x y mask) + // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt16x8 x y mask) + // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt32x16 x y mask) + // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt32x4 x y mask) + // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt32x8 x y mask) + // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt64x2 x y mask) + // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt64x4 x y mask) + // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt64x8 x y mask) + // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt8x16 x y mask) + // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt8x32 x y mask) + // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt8x64 x y mask) + // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint16x16 x y mask) + // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint16x32 x y mask) + // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint16x8 x y mask) + // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint32x16 x y mask) + // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint32x4 x y mask) + // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint32x8 x y mask) + // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint64x2 x y mask) + // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint64x4 x y mask) + // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint64x8 x y mask) + // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint8x16 x y mask) + // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint8x32 x y mask) + // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint8x64 x y mask) + // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat32x16 x y mask) + // result: (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat32x4 x y mask) + // result: (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat32x8 x y mask) + // result: (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat64x2 x y mask) + // result: (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat64x4 x y mask) + // result: (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat64x8 x y mask) + // result: (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt32x16 x y mask) + // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt32x4 x y mask) + // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt32x8 x y mask) + // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt64x2 x y mask) + // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt64x4 x y mask) + // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt64x8 x y mask) + // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat32x16 x y mask) + // result: (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat32x4 x y mask) + // result: (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat32x8 x y mask) + // result: (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat64x2 x y mask) + // result: (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat64x4 x y mask) + // result: (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat64x8 x y mask) + // result: (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt32x16 x y mask) + // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt32x4 x y mask) + // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt32x8 x y mask) + // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt64x2 x y mask) + // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt64x4 x y mask) + // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt64x8 x y mask) + // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint32x16 x y mask) + // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint32x4 x y mask) + // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint32x8 x y mask) + // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint64x2 x y mask) + // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint64x4 x y mask) + // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint64x8 x y mask) + // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint32x16 x y mask) + // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint32x4 x y mask) + // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint32x8 x y mask) + // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint64x2 x y mask) + // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint64x4 x y mask) + // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint64x8 x y mask) + // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x16 x y mask) + // result: (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x32 x y mask) + // result: (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x8 x y mask) + // result: (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x16 x y mask) + // result: (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x32 x y mask) + // result: (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x64 x y mask) + // result: (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x16 x y mask) + // result: (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x4 x y mask) + // result: (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x8 x y mask) + // result: (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x2 x y mask) + // result: (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x4 x y mask) + // result: (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x8 x y mask) + // result: (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat32x16 x y mask) + // result: (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat32x4 x y mask) + // result: (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat32x8 x y mask) + // result: (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat64x2 x y mask) + // result: (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat64x4 x y mask) + // result: (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat64x8 x y mask) + // result: (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt16x16 x y mask) + // result: (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt16x32 x y mask) + // result: (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt16x8 x y mask) + // result: (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt32x16 x y mask) + // result: (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt32x4 x y mask) + // result: (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt32x8 x y mask) + // result: (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt64x2 x y mask) + // result: (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt64x4 x y mask) + // result: (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt64x8 x y mask) + // result: (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt8x16 x y mask) + // result: (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt8x32 x y mask) + // result: (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt8x64 x y mask) + // result: (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint16x16 x y mask) + // result: (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint16x32 x y mask) + // result: (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint16x8 x y mask) + // result: (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint32x16 x y mask) + // result: (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint32x4 x y mask) + // result: (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint32x8 x y mask) + // result: (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint64x2 x y mask) + // result: (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint64x4 x y mask) + // result: (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint64x8 x y mask) + // result: (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint8x16 x y mask) + // result: (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint8x32 x y mask) + // result: (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint8x64 x y mask) + // result: (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat32x16 x y mask) + // result: (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat32x4 x y mask) + // result: (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat32x8 x y mask) + // result: (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat64x2 x y mask) + // result: (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat64x4 x y mask) + // result: (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat64x8 x y mask) + // result: (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt16x16 x y mask) + // result: (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt16x32 x y mask) + // result: (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt16x8 x y mask) + // result: (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt32x16 x y mask) + // result: (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt32x4 x y mask) + // result: (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt32x8 x y mask) + // result: (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt64x2 x y mask) + // result: (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt64x4 x y mask) + // result: (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt64x8 x y mask) + // result: (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt8x16 x y mask) + // result: (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt8x32 x y mask) + // result: (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt8x64 x y mask) + // result: (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint16x16 x y mask) + // result: (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint16x32 x y mask) + // result: (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint16x8 x y mask) + // result: (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint32x16 x y mask) + // result: (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint32x4 x y mask) + // result: (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint32x8 x y mask) + // result: (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint64x2 x y mask) + // result: (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint64x4 x y mask) + // result: (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint64x8 x y mask) + // result: (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint8x16 x y mask) + // result: (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint8x32 x y mask) + // result: (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint8x64 x y mask) + // result: (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float32x16 x y mask) + // result: (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float32x4 x y mask) + // result: (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float32x8 x y mask) + // result: (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float64x2 x y mask) + // result: (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float64x4 x y mask) + // result: (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float64x8 x y mask) + // result: (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenInt64x2 x y mask) + // result: (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenInt64x4 x y mask) + // result: (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenInt64x8 x y mask) + // result: (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenUint64x2 x y mask) + // result: (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULUDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenUint64x4 x y mask) + // result: (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULUDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenUint64x8 x y mask) + // result: (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULUDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat32x16 x y mask) + // result: (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat32x4 x y mask) + // result: (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat32x8 x y mask) + // result: (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat64x2 x y mask) + // result: (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat64x4 x y mask) + // result: (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat64x8 x y mask) + // result: (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x16 x y mask) + // result: (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x32 x y mask) + // result: (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x8 x y mask) + // result: (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x16 x y mask) + // result: (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x32 x y mask) + // result: (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x8 x y mask) + // result: (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x16 x y mask) + // result: (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x32 x y mask) + // result: (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x8 x y mask) + // result: (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x16 x y mask) + // result: (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x4 x y mask) + // result: (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x8 x y mask) + // result: (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x2 x y mask) + // result: (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x4 x y mask) + // result: (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x8 x y mask) + // result: (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat32x16 x y mask) + // result: (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat32x4 x y mask) + // result: (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat32x8 x y mask) + // result: (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat64x2 x y mask) + // result: (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat64x4 x y mask) + // result: (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat64x8 x y mask) + // result: (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt32x16 x y mask) + // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt32x4 x y mask) + // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt32x8 x y mask) + // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt64x2 x y mask) + // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt64x4 x y mask) + // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt64x8 x y mask) + // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint32x16 x y mask) + // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint32x4 x y mask) + // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint32x8 x y mask) + // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint64x2 x y mask) + // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint64x4 x y mask) + // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint64x8 x y mask) + // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x16 x y mask) + // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x32 x y mask) + // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x8 x y mask) + // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x16 x y mask) + // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x32 x y mask) + // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x64 x y mask) + // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x16 x y mask) + // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x32 x y mask) + // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x8 x y mask) + // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x16 x y mask) + // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x32 x y mask) + // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x64 x y mask) + // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x16 x y mask) + // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x32 x y mask) + // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x8 x y mask) + // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x16 x y mask) + // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x32 x y mask) + // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x64 x y mask) + // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x16 x y mask) + // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x32 x y mask) + // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x8 x y mask) + // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x16 x y mask) + // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x32 x y mask) + // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x64 x y mask) + // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat32x16 x mask) + // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat32x4 x mask) + // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat32x8 x mask) + // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat64x2 x mask) + // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat64x4 x mask) + // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat64x8 x mask) + // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat32x16 x y mask) + // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat32x4 x y mask) + // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat32x8 x y mask) + // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat64x2 x y mask) + // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat64x4 x y mask) + // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat64x8 x y mask) + // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt16x16 x y mask) + // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt16x32 x y mask) + // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt16x8 x y mask) + // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt32x16 x y mask) + // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt32x4 x y mask) + // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt32x8 x y mask) + // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt64x2 x y mask) + // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt64x4 x y mask) + // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt64x8 x y mask) + // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt8x16 x y mask) + // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt8x32 x y mask) + // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt8x64 x y mask) + // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint16x16 x y mask) + // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint16x32 x y mask) + // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint16x8 x y mask) + // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint32x16 x y mask) + // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint32x4 x y mask) + // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint32x8 x y mask) + // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint64x2 x y mask) + // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint64x4 x y mask) + // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint64x8 x y mask) + // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint8x16 x y mask) + // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint8x32 x y mask) + // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint8x64 x y mask) + // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat32x16 x y mask) + // result: (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat32x4 x y mask) + // result: (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat32x8 x y mask) + // result: (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x2 x y mask) + // result: (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x4 x y mask) + // result: (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x8 x y mask) + // result: (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt32x16 x y mask) + // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt32x4 x y mask) + // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt32x8 x y mask) + // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt64x2 x y mask) + // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt64x4 x y mask) + // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt64x8 x y mask) + // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint32x16 x y mask) + // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint32x4 x y mask) + // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint32x8 x y mask) + // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint64x2 x y mask) + // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint64x4 x y mask) + // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint64x8 x y mask) + // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMax32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMax64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaxFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat32x16 x y) + // result: (VMAXPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat32x4 x y) + // result: (VMAXPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat32x8 x y) + // result: (VMAXPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat64x2 x y) + // result: (VMAXPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat64x4 x y) + // result: (VMAXPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat64x8 x y) + // result: (VMAXPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt16x16 x y) + // result: (VPMAXSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt16x32 x y) + // result: (VPMAXSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt16x8 x y) + // result: (VPMAXSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt32x16 x y) + // result: (VPMAXSD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt32x4 x y) + // result: (VPMAXSD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt32x8 x y) + // result: (VPMAXSD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt64x2 x y) + // result: (VPMAXSQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt64x4 x y) + // result: (VPMAXSQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt64x8 x y) + // result: (VPMAXSQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt8x16 x y) + // result: (VPMAXSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt8x32 x y) + // result: (VPMAXSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt8x64 x y) + // result: (VPMAXSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint16x16 x y) + // result: (VPMAXUW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint16x32 x y) + // result: (VPMAXUW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint16x8 x y) + // result: (VPMAXUW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint32x16 x y) + // result: (VPMAXUD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint32x4 x y) + // result: (VPMAXUD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint32x8 x y) + // result: (VPMAXUD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint64x2 x y) + // result: (VPMAXUQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint64x4 x y) + // result: (VPMAXUQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint64x8 x y) + // result: (VPMAXUQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint8x16 x y) + // result: (VPMAXUB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint8x32 x y) + // result: (VPMAXUB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint8x64 x y) + // result: (VPMAXUB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMin32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueAMD64_OpMin64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueAMD64_OpMinFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat32x16 x y) + // result: (VMINPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat32x4 x y) + // result: (VMINPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat32x8 x y) + // result: (VMINPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat64x2 x y) + // result: (VMINPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat64x4 x y) + // result: (VMINPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat64x8 x y) + // result: (VMINPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt16x16 x y) + // result: (VPMINSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt16x32 x y) + // result: (VPMINSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt16x8 x y) + // result: (VPMINSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt32x16 x y) + // result: (VPMINSD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt32x4 x y) + // result: (VPMINSD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt32x8 x y) + // result: (VPMINSD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt64x2 x y) + // result: (VPMINSQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt64x4 x y) + // result: (VPMINSQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt64x8 x y) + // result: (VPMINSQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt8x16 x y) + // result: (VPMINSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt8x32 x y) + // result: (VPMINSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt8x64 x y) + // result: (VPMINSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint16x16 x y) + // result: (VPMINUW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint16x32 x y) + // result: (VPMINUW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint16x8 x y) + // result: (VPMINUW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint32x16 x y) + // result: (VPMINUD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint32x4 x y) + // result: (VPMINUD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint32x8 x y) + // result: (VPMINUD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint64x2 x y) + // result: (VPMINUQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint64x4 x y) + // result: (VPMINUQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUQ256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) +} +func rewriteValueAMD64_OpMinUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint64x8 x y) + // result: (VPMINUQ512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUQ512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) +} +func rewriteValueAMD64_OpMinUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint8x16 x y) + // result: (VPMINUB128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUB128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) +} +func rewriteValueAMD64_OpMinUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint8x32 x y) + // result: (VPMINUB256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUB256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) +} +func rewriteValueAMD64_OpMinUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint8x64 x y) + // result: (VPMINUB512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUB512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) +} +func rewriteValueAMD64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { - t := v.Type + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) +} +func rewriteValueAMD64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) +} +func rewriteValueAMD64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { - t := v.Type + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) +} +func rewriteValueAMD64_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) +} +func rewriteValueAMD64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { - t := v.Type + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) +} +func rewriteValueAMD64_OpMod64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) +} +func rewriteValueAMD64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { break } - cond := v_2.Args[0] - if !(is16BitInt(t)) { + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { break } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + if auxIntToInt64(v.AuxInt) != 2 { break } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + if auxIntToInt64(v.AuxInt) != 4 { break } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + if auxIntToInt64(v.AuxInt) != 8 { break } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + if auxIntToInt64(v.AuxInt) != 16 { break } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + if auxIntToInt64(v.AuxInt) != 32 { break } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) + // match: (Move [48] dst src mem) + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + if auxIntToInt64(v.AuxInt) != 48 { break } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } - return false -} -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) + // match: (Move [64] dst src mem) + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpCtz16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 9 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + if auxIntToInt64(v.AuxInt) != 10 { break } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + if auxIntToInt64(v.AuxInt) != 11 { break } - v.reset(OpAMD64BSFL) - v.AddArg(x) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - return false -} -func rewriteValueAMD64_OpCtz32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + if auxIntToInt64(v.AuxInt) != 12 { break } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { break } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - return false -} -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { break } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8) { break } - v.reset(OpAMD64BSFL) - v.AddArg(x) + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } - return false -} -func rewriteValueAMD64_OpCtz64(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { break } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) return true } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) + // match: (Move [s] dst src mem) + // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { break } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } return false } -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (MulByPowOf2Float32x16 x y) + // result: (VSCALEFPS512 y x) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VSCALEFPS512) + v.AddArg2(y, x) return true } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) +} +func rewriteValueAMD64_OpMulByPowOf2Float32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MulByPowOf2Float32x4 x y) + // result: (VSCALEFPS128 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VSCALEFPS128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpCtz8(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) + // match: (MulByPowOf2Float32x8 x y) + // result: (VSCALEFPS256 y x) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VSCALEFPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (MulByPowOf2Float64x2 x y) + // result: (VSCALEFPD128 y x) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VSCALEFPD128) + v.AddArg2(y, x) return true } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) +} +func rewriteValueAMD64_OpMulByPowOf2Float64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MulByPowOf2Float64x4 x y) + // result: (VSCALEFPD256 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VSCALEFPD256) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpDiv16(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) + // match: (MulByPowOf2Float64x8 x y) + // result: (VSCALEFPD512 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv16u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) + // match: (MulEvenWidenInt32x4 x y) + // result: (VPMULDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) + // match: (MulEvenWidenInt32x8 x y) + // result: (VPMULDQ256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv32u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) + // match: (MulEvenWidenInt64x2 x y) + // result: (VPMULDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv64(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) + // match: (MulEvenWidenInt64x4 x y) + // result: (VPMULDQ256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv64u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) + // match: (MulEvenWidenInt64x8 x y) + // result: (VPMULDQ512 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv8(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (MulEvenWidenUint32x4 x y) + // result: (VPMULUDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv8u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (MulEvenWidenUint32x8 x y) + // result: (VPMULUDQ256 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq16(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) + // match: (MulEvenWidenUint64x2 x y) + // result: (VPMULUDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) + // match: (MulEvenWidenUint64x4 x y) + // result: (VPMULUDQ256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq32F(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) + // match: (MulEvenWidenUint64x8 x y) + // result: (VPMULUDQ512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq64(v *Value) bool { +func rewriteValueAMD64_OpMulFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) + // match: (MulFloat32x16 x y) + // result: (VMULPS512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPS512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq64F(v *Value) bool { +func rewriteValueAMD64_OpMulFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) + // match: (MulFloat32x4 x y) + // result: (VMULPS128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPS128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq8(v *Value) bool { +func rewriteValueAMD64_OpMulFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) + // match: (MulFloat32x8 x y) + // result: (VMULPS256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEqB(v *Value) bool { +func rewriteValueAMD64_OpMulFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) + // match: (MulFloat64x2 x y) + // result: (VMULPD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEqPtr(v *Value) bool { +func rewriteValueAMD64_OpMulFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) + // match: (MulFloat64x4 x y) + // result: (VMULPD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMulFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) + // match: (MulFloat64x8 x y) + // result: (VMULPD512 y x) for { x := v_0 y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) + v.reset(OpAMD64VMULPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpFloor(v *Value) bool { +func rewriteValueAMD64_OpMulHighInt16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) + // match: (MulHighInt16x16 x y) + // result: (VPMULHW256 y x) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMULHW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpGetG(v *Value) bool { +func rewriteValueAMD64_OpMulHighInt16x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) + // match: (MulHighInt16x32 x y) + // result: (VPMULHW512 y x) for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { - break - } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHW512) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) +func rewriteValueAMD64_OpMulHighInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MulHighInt16x8 x y) + // result: (VPMULHW128 y x) for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { +func rewriteValueAMD64_OpMulHighUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) + // match: (MulHighUint16x16 x y) + // result: (VPMULHUW256 y x) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHUW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { +func rewriteValueAMD64_OpMulHighUint16x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) + // match: (MulHighUint16x32 x y) + // result: (VPMULHUW512 y x) for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHUW512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { +func rewriteValueAMD64_OpMulHighUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) + // match: (MulHighUint16x8 x y) + // result: (VPMULHUW128 y x) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHUW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq16(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) + // match: (MulLowInt16x16 x y) + // result: (VPMULLW256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq16U(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) + // match: (MulLowInt16x32 x y) + // result: (VPMULLW512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLW512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq32(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) + // match: (MulLowInt16x8 x y) + // result: (VPMULLW128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq32F(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) + // match: (MulLowInt32x16 x y) + // result: (VPMULLD512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64VPMULLD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq32U(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) + // match: (MulLowInt32x4 x y) + // result: (VPMULLD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq64(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) + // match: (MulLowInt32x8 x y) + // result: (VPMULLD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq64F(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) + // match: (MulLowInt64x2 x y) + // result: (VPMULLQ128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64VPMULLQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq64U(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) + // match: (MulLowInt64x4 x y) + // result: (VPMULLQ256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq8(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) + // match: (MulLowInt64x8 x y) + // result: (VPMULLQ512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq8U(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg64F(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16U(v *Value) bool { +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) + v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32(v *Value) bool { +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) + v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32F(v *Value) bool { +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) + v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32U(v *Value) bool { +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess64(v *Value) bool { +func rewriteValueAMD64_OpNeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess64F(v *Value) bool { +func rewriteValueAMD64_OpNeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess64U(v *Value) bool { +func rewriteValueAMD64_OpNeqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess8(v *Value) bool { +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess8U(v *Value) bool { +func rewriteValueAMD64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORLconst [1] x) + for { + x := v_0 + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLoad(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] y x) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) - return true - } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] y x) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] y x) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] y x) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { break } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } - return false } -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { +func rewriteValueAMD64_OpOrFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (OrFloat32x16 x y) + // result: (VORPS512 y x) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VORPS512) + v.AddArg2(y, x) return true } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpOrFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrFloat32x4 x y) + // result: (VORPS128 y x) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VORPS128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { +func rewriteValueAMD64_OpOrFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (OrFloat32x8 x y) + // result: (VORPS256 y x) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VORPS256) + v.AddArg2(y, x) return true } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpOrFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrFloat64x2 x y) + // result: (VORPD128 y x) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VORPD128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpMax32F(v *Value) bool { +func rewriteValueAMD64_OpOrFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) + // match: (OrFloat64x4 x y) + // result: (VORPD256 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VORPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMax64F(v *Value) bool { +func rewriteValueAMD64_OpOrFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) + // match: (OrFloat64x8 x y) + // result: (VORPD512 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VORPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMin32F(v *Value) bool { +func rewriteValueAMD64_OpOrInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) + // match: (OrInt16x16 x y) + // result: (VPOR256 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMin64F(v *Value) bool { +func rewriteValueAMD64_OpOrInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) + // match: (OrInt16x8 x y) + // result: (VPOR128 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod16(v *Value) bool { +func rewriteValueAMD64_OpOrInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) + // match: (OrInt32x16 x y) + // result: (VPORD512 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPORD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod16u(v *Value) bool { +func rewriteValueAMD64_OpOrInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) + // match: (OrInt32x4 x y) + // result: (VPOR128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod32(v *Value) bool { +func rewriteValueAMD64_OpOrInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) + // match: (OrInt32x8 x y) + // result: (VPOR256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod32u(v *Value) bool { +func rewriteValueAMD64_OpOrInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) + // match: (OrInt64x2 x y) + // result: (VPOR128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod64(v *Value) bool { +func rewriteValueAMD64_OpOrInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) + // match: (OrInt64x4 x y) + // result: (VPOR256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod64u(v *Value) bool { +func rewriteValueAMD64_OpOrInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) + // match: (OrInt64x8 x y) + // result: (VPORQ512 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPORQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod8(v *Value) bool { +func rewriteValueAMD64_OpOrInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (OrInt8x16 x y) + // result: (VPOR128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod8u(v *Value) bool { +func rewriteValueAMD64_OpOrInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (OrInt8x32 x y) + // result: (VPOR256 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMove(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpOrUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem + // match: (OrUint16x16 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) +} +func rewriteValueAMD64_OpOrUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint16x8 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) +} +func rewriteValueAMD64_OpOrUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint32x16 x y) + // result: (VPORD512 y x) for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPORD512) + v.AddArg2(y, x) return true } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) +} +func rewriteValueAMD64_OpOrUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint32x4 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [32] dst src mem) - // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) +} +func rewriteValueAMD64_OpOrUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint32x8 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 32 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [48] dst src mem) - // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) +} +func rewriteValueAMD64_OpOrUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint64x2 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 48 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [64] dst src mem) - // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) +} +func rewriteValueAMD64_OpOrUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint64x4 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 64 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(32) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(32) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(32) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) +} +func rewriteValueAMD64_OpOrUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint64x8 x y) + // result: (VPORQ512 y x) for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPORQ512) + v.AddArg2(y, x) return true } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpOrUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint8x16 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpOrUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint8x32 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat32x4 x y) + // result: (VHADDPS128 y x) for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPS128) + v.AddArg2(y, x) return true } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat32x8 x y) + // result: (VHADDPS256 y x) for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPS256) + v.AddArg2(y, x) return true } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat64x2 x y) + // result: (VHADDPD128 y x) for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPD128) + v.AddArg2(y, x) return true } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat64x4 x y) + // result: (VHADDPD256 y x) for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPD256) + v.AddArg2(y, x) return true } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt16x16 x y) + // result: (VPHADDW256 y x) for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW256) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt16x8 x y) + // result: (VPHADDW128 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW128) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 <= 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt32x4 x y) + // result: (VPHADDD128 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 <= 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDD128) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt32x8 x y) + // result: (VPHADDD256 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 > 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDD256) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) - // result: (DUFFCOPY [s] dst src mem) +} +func rewriteValueAMD64_OpPairwiseAddUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddUint16x16 x y) + // result: (VPHADDW256 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64DUFFCOPY) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW256) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) +} +func rewriteValueAMD64_OpPairwiseAddUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddUint16x8 x y) + // result: (VPHADDW128 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpNeg32F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseAddUint32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + // match: (PairwiseAddUint32x4 x y) + // result: (VPHADDD128 y x) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPHADDD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeg64F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseAddUint32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + // match: (PairwiseAddUint32x8 x y) + // result: (VPHADDD256 y x) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPHADDD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq16(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) + // match: (PairwiseSubFloat32x4 x y) + // result: (VHSUBPS128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPS128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq32(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) + // match: (PairwiseSubFloat32x8 x y) + // result: (VHSUBPS256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq32F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) + // match: (PairwiseSubFloat64x2 x y) + // result: (VHSUBPD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq64(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) + // match: (PairwiseSubFloat64x4 x y) + // result: (VHSUBPD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq64F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) + // match: (PairwiseSubInt16x16 x y) + // result: (VPHSUBW256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq8(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) + // match: (PairwiseSubInt16x8 x y) + // result: (VPHSUBW128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeqB(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) + // match: (PairwiseSubInt32x4 x y) + // result: (VPHSUBD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) + // match: (PairwiseSubInt32x8 x y) + // result: (VPHSUBD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNot(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubUint16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) + // match: (PairwiseSubUint16x16 x y) + // result: (VPHSUBW256 y x) for { x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPHSUBW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpOffPtr(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubUint16x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) + // match: (PairwiseSubUint16x8 x y) + // result: (VPHSUBW128 y x) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBW128) + v.AddArg2(y, x) return true } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) +} +func rewriteValueAMD64_OpPairwiseSubUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseSubUint32x4 x y) + // result: (VPHSUBD128 y x) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpPairwiseSubUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseSubUint32x8 x y) + // result: (VPHSUBD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBD256) + v.AddArg2(y, x) return true } } @@ -28087,6 +46426,270 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } +func rewriteValueAMD64_OpPopCountInt16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt16x16 x) + // result: (VPOPCNTW256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt16x32 x) + // result: (VPOPCNTW512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt16x8 x) + // result: (VPOPCNTW128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt32x16 x) + // result: (VPOPCNTD512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt32x4 x) + // result: (VPOPCNTD128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt32x8 x) + // result: (VPOPCNTD256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt64x2 x) + // result: (VPOPCNTQ128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt64x4 x) + // result: (VPOPCNTQ256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt64x8 x) + // result: (VPOPCNTQ512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt8x16 x) + // result: (VPOPCNTB128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt8x32 x) + // result: (VPOPCNTB256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt8x64 x) + // result: (VPOPCNTB512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint16x16 x) + // result: (VPOPCNTW256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint16x32 x) + // result: (VPOPCNTW512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint16x8 x) + // result: (VPOPCNTW128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint32x16 x) + // result: (VPOPCNTD512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint32x4 x) + // result: (VPOPCNTD128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint32x8 x) + // result: (VPOPCNTD256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint64x2 x) + // result: (VPOPCNTQ128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint64x4 x) + // result: (VPOPCNTQ256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint64x8 x) + // result: (VPOPCNTQ512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint8x16 x) + // result: (VPOPCNTB128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint8x32 x) + // result: (VPOPCNTB256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint8x64 x) + // result: (VPOPCNTB512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB512) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -29427,6 +48030,370 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } +func rewriteValueAMD64_OpSaturatedAddInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt16x16 x y) + // result: (VPADDSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt16x32 x y) + // result: (VPADDSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt16x8 x y) + // result: (VPADDSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt8x16 x y) + // result: (VPADDSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt8x32 x y) + // result: (VPADDSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt8x64 x y) + // result: (VPADDSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint16x16 x y) + // result: (VPADDSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint16x32 x y) + // result: (VPADDSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint16x8 x y) + // result: (VPADDSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint8x16 x y) + // result: (VPADDSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint8x32 x y) + // result: (VPADDSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint8x64 x y) + // result: (VPADDSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseAddInt16x16 x y) + // result: (VPHADDSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseAddInt16x8 x y) + // result: (VPHADDSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseSubInt16x16 x y) + // result: (VPHSUBSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseSubInt16x8 x y) + // result: (VPHSUBSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt16x16 x y) + // result: (VPSUBSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt16x32 x y) + // result: (VPSUBSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt16x8 x y) + // result: (VPSUBSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt8x16 x y) + // result: (VPSUBSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt8x32 x y) + // result: (VPSUBSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt8x64 x y) + // result: (VPSUBSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint16x16 x y) + // result: (VPSUBSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint16x32 x y) + // result: (VPSUBSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint16x8 x y) + // result: (VPSUBSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint8x16 x y) + // result: (VPSUBSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint8x32 x y) + // result: (VPSUBSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint8x64 x y) + // result: (VPSUBSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB512) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -29852,6 +48819,84 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSignInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt16x16 x y) + // result: (VPSIGNW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt16x8 x y) + // result: (VPSIGNW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt32x4 x y) + // result: (VPSIGND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt32x8 x y) + // result: (VPSIGND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt8x16 x y) + // result: (VPSIGNB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt8x32 x y) + // result: (VPSIGNB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNB256) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -29896,13 +48941,79 @@ func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { x := v_0 - y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat32x16 x) + // result: (VSQRTPS512 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPS512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat32x4 x) + // result: (VSQRTPS128 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPS128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat32x8 x) + // result: (VSQRTPS256 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPS256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat64x2 x) + // result: (VSQRTPD128 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat64x4 x) + // result: (VSQRTPD256 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat64x8 x) + // result: (VSQRTPD512 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPD512) + v.AddArg(x) return true } } @@ -30047,6 +49158,396 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } +func rewriteValueAMD64_OpSubFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat32x16 x y) + // result: (VADDPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat32x4 x y) + // result: (VADDPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat32x8 x y) + // result: (VADDPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat64x2 x y) + // result: (VADDPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat64x4 x y) + // result: (VADDPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat64x8 x y) + // result: (VADDPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt16x16 x y) + // result: (VPSUBW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt16x32 x y) + // result: (VPSUBW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt16x8 x y) + // result: (VPSUBW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt32x16 x y) + // result: (VPSUBD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt32x4 x y) + // result: (VPSUBD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt32x8 x y) + // result: (VPSUBD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt64x2 x y) + // result: (VPSUBQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt64x4 x y) + // result: (VPSUBQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt64x8 x y) + // result: (VPSUBQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt8x16 x y) + // result: (VPSUBB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt8x32 x y) + // result: (VPSUBB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt8x64 x y) + // result: (VPSUBB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint16x16 x y) + // result: (VPSUBW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint16x32 x y) + // result: (VPSUBW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint16x8 x y) + // result: (VPSUBW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint32x16 x y) + // result: (VPSUBD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint32x4 x y) + // result: (VPSUBD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint32x8 x y) + // result: (VPSUBD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint64x2 x y) + // result: (VPSUBQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint64x4 x y) + // result: (VPSUBQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint64x8 x y) + // result: (VPSUBQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint8x16 x y) + // result: (VPSUBB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint8x32 x y) + // result: (VPSUBB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint8x64 x y) + // result: (VPSUBB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB512) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] // match: (Trunc x) @@ -30059,6 +49560,344 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool { return true } } +func rewriteValueAMD64_OpXorFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat32x16 x y) + // result: (VXORPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat32x4 x y) + // result: (VXORPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat32x8 x y) + // result: (VXORPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat64x2 x y) + // result: (VXORPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat64x4 x y) + // result: (VXORPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat64x8 x y) + // result: (VXORPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt16x16 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt16x8 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt32x16 x y) + // result: (VPXORD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt32x4 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt32x8 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt64x2 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt64x4 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt64x8 x y) + // result: (VPXORQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt8x16 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt8x32 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint16x16 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint16x8 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint32x16 x y) + // result: (VPXORD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint32x4 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint32x8 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint64x2 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint64x4 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint64x8 x y) + // result: (VPXORQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint8x16 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint8x32 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index c185a956674b83..cf3c1813e47740 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1,15 +1,1519 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. - +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package ssagen import ( - // "cmd/compile/internal/ir" - // "cmd/compile/internal/ssa" - // "cmd/compile/internal/types" + "cmd/compile/internal/ir" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" "cmd/internal/sys" ) +const simdPackage = "simd" + func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { - // addF("internal/simd", "Int32x4.Uint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - // etc + addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.Absolute", opLen1(ssa.OpAbsoluteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Absolute", opLen1(ssa.OpAbsoluteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Absolute", opLen1(ssa.OpAbsoluteInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Absolute", opLen1(ssa.OpAbsoluteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.LessEqual", opLen2(ssa.OpLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Add", opLen2(ssa.OpAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Max", opLen2(ssa.OpMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Min", opLen2(ssa.OpMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Add", opLen2(ssa.OpAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Max", opLen2(ssa.OpMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Min", opLen2(ssa.OpMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Sub", opLen2(ssa.OpSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Add", opLen2(ssa.OpAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.And", opLen2(ssa.OpAndFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Max", opLen2(ssa.OpMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Min", opLen2(ssa.OpMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Sub", opLen2(ssa.OpSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.Add", opLen2(ssa.OpAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.And", opLen2(ssa.OpAndInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Max", opLen2(ssa.OpMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Min", opLen2(ssa.OpMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Sub", opLen2(ssa.OpSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Add", opLen2(ssa.OpAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Max", opLen2(ssa.OpMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Min", opLen2(ssa.OpMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Add", opLen2(ssa.OpAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Max", opLen2(ssa.OpMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Min", opLen2(ssa.OpMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Sub", opLen2(ssa.OpSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.Add", opLen2(ssa.OpAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Max", opLen2(ssa.OpMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Min", opLen2(ssa.OpMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Sub", opLen2(ssa.OpSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Add", opLen2(ssa.OpAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Max", opLen2(ssa.OpMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Min", opLen2(ssa.OpMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Sub", opLen2(ssa.OpSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Add", opLen2(ssa.OpAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Max", opLen2(ssa.OpMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Min", opLen2(ssa.OpMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Sub", opLen2(ssa.OpSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Add", opLen2(ssa.OpAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.And", opLen2(ssa.OpAndInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Max", opLen2(ssa.OpMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Min", opLen2(ssa.OpMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Or", opLen2(ssa.OpOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Sub", opLen2(ssa.OpSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Xor", opLen2(ssa.OpXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Add", opLen2(ssa.OpAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.And", opLen2(ssa.OpAndInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Max", opLen2(ssa.OpMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Min", opLen2(ssa.OpMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Or", opLen2(ssa.OpOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Sub", opLen2(ssa.OpSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Xor", opLen2(ssa.OpXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Add", opLen2(ssa.OpAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.And", opLen2(ssa.OpAndInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Max", opLen2(ssa.OpMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Min", opLen2(ssa.OpMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Sub", opLen2(ssa.OpSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Add", opLen2(ssa.OpAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Max", opLen2(ssa.OpMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Min", opLen2(ssa.OpMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Sub", opLen2(ssa.OpSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Add", opLen2(ssa.OpAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Max", opLen2(ssa.OpMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Min", opLen2(ssa.OpMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Sub", opLen2(ssa.OpSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Add", opLen2(ssa.OpAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Max", opLen2(ssa.OpMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Min", opLen2(ssa.OpMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Sub", opLen2(ssa.OpSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.Add", opLen2(ssa.OpAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.AndNot", opLen2(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Max", opLen2(ssa.OpMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Min", opLen2(ssa.OpMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Max", opLen2(ssa.OpMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Min", opLen2(ssa.OpMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Max", opLen2(ssa.OpMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Min", opLen2(ssa.OpMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Max", opLen2(ssa.OpMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Min", opLen2(ssa.OpMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Sub", opLen2(ssa.OpSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Add", opLen2(ssa.OpAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNot", opLen2(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Max", opLen2(ssa.OpMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Min", opLen2(ssa.OpMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Sub", opLen2(ssa.OpSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Add", opLen2(ssa.OpAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNot", opLen2(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Max", opLen2(ssa.OpMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Min", opLen2(ssa.OpMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Sub", opLen2(ssa.OpSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Add", opLen2(ssa.OpAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Max", opLen2(ssa.OpMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Min", opLen2(ssa.OpMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Max", opLen2(ssa.OpMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Min", opLen2(ssa.OpMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Max", opLen2(ssa.OpMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Min", opLen2(ssa.OpMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) +} + +func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(op, t, args[0]) + } +} + +func opLen2(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(op, t, args[0], args[1]) + } +} + +func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[0], args[1], args[2]) + } +} + +func simdLoad() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpLoad, n.Type(), args[0], s.mem()) + } +} + +func simdStore() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.store(args[0].Type, args[1], args[0]) + return nil + } } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go new file mode 100644 index 00000000000000..e611092c4335c2 --- /dev/null +++ b/src/simd/simd_test.go @@ -0,0 +1,165 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd + +package simd_test + +import ( + "simd" + "testing" +) + +func TestType(t *testing.T) { + // Testing: + // - Defined as another struct's field is safe + // - Pointer is safe. + // - typedef is safe + // - type alias is safe + // - type conversion is safe + type alias = simd.Int32x4 + type maskT simd.Mask32x4 + type myStruct struct { + x alias + y *simd.Int32x4 + z maskT + } + vals := [4]int32{1, 2, 3, 4} + v := myStruct{x: simd.LoadInt32x4(&vals)} + // masking elements 1 and 2. + maskv := [4]int32{-1, -1, 0, 0} + want := []int32{2, 4, 0, 0} + y := simd.LoadInt32x4(&vals) + v.y = &y + + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + v.z = maskT(simd.LoadInt32x4(&maskv).AsMask32x4()) + *v.y = v.y.MaskedAdd(v.x, simd.Mask32x4(v.z)) + + got := [4]int32{} + v.y.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestAdd(t *testing.T) { + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + x = x.Add(y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestVectorConversion(t *testing.T) { + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + xv := [4]int32{1, 2, 3, 4} + x := simd.LoadInt32x4(&xv) + xPromoted := x.AsInt64x2() + xPromotedDemoted := xPromoted.AsInt32x4() + got := [4]int32{} + xPromotedDemoted.Store(&got) + for i := range 4 { + if xv[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, xv[i], got[i]) + } + } +} + +func TestMaskConversion(t *testing.T) { + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + v := [4]int32{1, 0, 1, 0} + x := simd.LoadInt32x4(&v) + var y simd.Int32x4 + mask := y.Sub(x).AsMask32x4() + v = [4]int32{5, 6, 7, 8} + y = simd.LoadInt32x4(&v) + y = y.MaskedAdd(x, mask) + got := [4]int32{6, 0, 8, 0} + y.Store(&v) + for i := range 4 { + if v[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, v[i], got[i]) + } + } +} + +func TestMaskedAdd(t *testing.T) { + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + // masking elements 1 and 2. + maskv := [4]int32{-1, -1, 0, 0} + want := []int32{6, 8, 0, 0} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + mask := simd.LoadInt32x4(&maskv).AsMask32x4() + x = x.MaskedAdd(y, mask) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestCompare(t *testing.T) { + xv := [4]int32{5, 1, 5, 3} + yv := [4]int32{3, 3, 3, 3} + want := []int32{8, 0, 8, 0} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + if !simd.HasAVX512BW() { + t.Skip("Test requires HasAVX512BW, not available on this hardware") + return + } + mask := x.Greater(y) + x = x.MaskedAdd(y, mask) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestSub(t *testing.T) { + xv := [4]int32{5, 5, 5, 3} + yv := [4]int32{3, 3, 3, 3} + want := []int32{2, 2, 2, 0} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + x = x.Sub(y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go new file mode 100644 index 00000000000000..5fd4a78ee7cb26 --- /dev/null +++ b/src/simd/stubs_amd64.go @@ -0,0 +1,4151 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x16) ApproximateReciprocal() Float32x16 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x16) Sqrt() Float32x16 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x4) ApproximateReciprocal() Float32x4 + +// Asm: VRSQRTPS, Arch: AVX +func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 + +// Asm: VSQRTPS, Arch: AVX +func (x Float32x4) Sqrt() Float32x4 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x8) ApproximateReciprocal() Float32x8 + +// Asm: VRSQRTPS, Arch: AVX +func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 + +// Asm: VSQRTPS, Arch: AVX +func (x Float32x8) Sqrt() Float32x8 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x2) ApproximateReciprocal() Float64x2 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 + +// Asm: VSQRTPD, Arch: AVX +func (x Float64x2) Sqrt() Float64x2 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x4) ApproximateReciprocal() Float64x4 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 + +// Asm: VSQRTPD, Arch: AVX +func (x Float64x4) Sqrt() Float64x4 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x8) ApproximateReciprocal() Float64x8 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x8) Sqrt() Float64x8 + +// Asm: VPABSW, Arch: AVX2 +func (x Int16x16) Absolute() Int16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x16) PopCount() Int16x16 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x32) Absolute() Int16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x32) PopCount() Int16x32 + +// Asm: VPABSW, Arch: AVX +func (x Int16x8) Absolute() Int16x8 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x8) PopCount() Int16x8 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x16) Absolute() Int32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x16) PopCount() Int32x16 + +// Asm: VPABSD, Arch: AVX +func (x Int32x4) Absolute() Int32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x4) PopCount() Int32x4 + +// Asm: VPABSD, Arch: AVX2 +func (x Int32x8) Absolute() Int32x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x8) PopCount() Int32x8 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x2) Absolute() Int64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x2) PopCount() Int64x2 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x4) Absolute() Int64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x4) PopCount() Int64x4 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x8) Absolute() Int64x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x8) PopCount() Int64x8 + +// Asm: VPABSB, Arch: AVX +func (x Int8x16) Absolute() Int8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x16) PopCount() Int8x16 + +// Asm: VPABSB, Arch: AVX2 +func (x Int8x32) Absolute() Int8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x32) PopCount() Int8x32 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x64) Absolute() Int8x64 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x64) PopCount() Int8x64 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x16) PopCount() Uint16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x32) PopCount() Uint16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x8) PopCount() Uint16x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x16) PopCount() Uint32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x4) PopCount() Uint32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x8) PopCount() Uint32x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x2) PopCount() Uint64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x4) PopCount() Uint64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x8) PopCount() Uint64x8 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x16) PopCount() Uint8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x32) PopCount() Uint8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x64) PopCount() Uint8x64 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) Add(y Float32x16) Float32x16 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x16) And(y Float32x16) Float32x16 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x16) AndNot(y Float32x16) Float32x16 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x16) Div(y Float32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x16) Equal(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x16) Greater(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x16) IsNan(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x16) Less(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x16) Min(y Float32x16) Float32x16 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x16) Mul(y Float32x16) Float32x16 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x16) Or(y Float32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) Sub(y Float32x16) Float32x16 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x16) Xor(y Float32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX +func (x Float32x4) Add(y Float32x4) Float32x4 + +// Asm: VANDPS, Arch: AVX +func (x Float32x4) And(y Float32x4) Float32x4 + +// Asm: VANDNPS, Arch: AVX +func (x Float32x4) AndNot(y Float32x4) Float32x4 + +// Asm: VDIVPS, Arch: AVX +func (x Float32x4) Div(y Float32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float32x4) Equal(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float32x4) Greater(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x4) IsNan(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float32x4) Less(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 + +// Asm: VMAXPS, Arch: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 + +// Asm: VMINPS, Arch: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 + +// Asm: VMULPS, Arch: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 + +// Asm: VORPS, Arch: AVX +func (x Float32x4) Or(y Float32x4) Float32x4 + +// Asm: VHADDPS, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 + +// Asm: VHSUBPS, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX +func (x Float32x4) Sub(y Float32x4) Float32x4 + +// Asm: VXORPS, Arch: AVX +func (x Float32x4) Xor(y Float32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX +func (x Float32x8) Add(y Float32x8) Float32x8 + +// Asm: VANDPS, Arch: AVX +func (x Float32x8) And(y Float32x8) Float32x8 + +// Asm: VANDNPS, Arch: AVX +func (x Float32x8) AndNot(y Float32x8) Float32x8 + +// Asm: VDIVPS, Arch: AVX +func (x Float32x8) Div(y Float32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float32x8) Equal(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float32x8) Greater(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x8) IsNan(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float32x8) Less(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 + +// Asm: VMAXPS, Arch: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 + +// Asm: VMINPS, Arch: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 + +// Asm: VMULPS, Arch: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 + +// Asm: VORPS, Arch: AVX +func (x Float32x8) Or(y Float32x8) Float32x8 + +// Asm: VHADDPS, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 + +// Asm: VHSUBPS, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 + +// Asm: VADDPS, Arch: AVX +func (x Float32x8) Sub(y Float32x8) Float32x8 + +// Asm: VXORPS, Arch: AVX +func (x Float32x8) Xor(y Float32x8) Float32x8 + +// Asm: VADDPD, Arch: AVX +func (x Float64x2) Add(y Float64x2) Float64x2 + +// Asm: VANDPD, Arch: AVX +func (x Float64x2) And(y Float64x2) Float64x2 + +// Asm: VANDNPD, Arch: AVX +func (x Float64x2) AndNot(y Float64x2) Float64x2 + +// Asm: VDIVPD, Arch: AVX +func (x Float64x2) Div(y Float64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float64x2) Equal(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float64x2) Greater(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x2) IsNan(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float64x2) Less(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 + +// Asm: VMAXPD, Arch: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 + +// Asm: VMINPD, Arch: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 + +// Asm: VMULPD, Arch: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 + +// Asm: VORPD, Arch: AVX +func (x Float64x2) Or(y Float64x2) Float64x2 + +// Asm: VHADDPD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 + +// Asm: VHSUBPD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX +func (x Float64x2) Sub(y Float64x2) Float64x2 + +// Asm: VXORPD, Arch: AVX +func (x Float64x2) Xor(y Float64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX +func (x Float64x4) Add(y Float64x4) Float64x4 + +// Asm: VANDPD, Arch: AVX +func (x Float64x4) And(y Float64x4) Float64x4 + +// Asm: VANDNPD, Arch: AVX +func (x Float64x4) AndNot(y Float64x4) Float64x4 + +// Asm: VDIVPD, Arch: AVX +func (x Float64x4) Div(y Float64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float64x4) Equal(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float64x4) Greater(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x4) IsNan(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float64x4) Less(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 + +// Asm: VMAXPD, Arch: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 + +// Asm: VMINPD, Arch: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 + +// Asm: VMULPD, Arch: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 + +// Asm: VORPD, Arch: AVX +func (x Float64x4) Or(y Float64x4) Float64x4 + +// Asm: VHADDPD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 + +// Asm: VHSUBPD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX +func (x Float64x4) Sub(y Float64x4) Float64x4 + +// Asm: VXORPD, Arch: AVX +func (x Float64x4) Xor(y Float64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) Add(y Float64x8) Float64x8 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x8) And(y Float64x8) Float64x8 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x8) AndNot(y Float64x8) Float64x8 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x8) Div(y Float64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x8) Equal(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x8) Greater(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x8) IsNan(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x8) Less(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x8) Min(y Float64x8) Float64x8 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x8) Mul(y Float64x8) Float64x8 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x8) Or(y Float64x8) Float64x8 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) Sub(y Float64x8) Float64x8 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x8) Xor(y Float64x8) Float64x8 + +// Asm: VPADDW, Arch: AVX2 +func (x Int16x16) Add(y Int16x16) Int16x16 + +// Asm: VPAND, Arch: AVX2 +func (x Int16x16) And(y Int16x16) Int16x16 + +// Asm: VPANDN, Arch: AVX2 +func (x Int16x16) AndNot(y Int16x16) Int16x16 + +// Asm: VPCMPEQW, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int16x16) Equal(y Int16x16) Mask16x16 + +// Asm: VPCMPGTW, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int16x16) Greater(y Int16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x16) Less(y Int16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 + +// Asm: VPMAXSW, Arch: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 + +// Asm: VPMINSW, Arch: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 + +// Asm: VPMULHW, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// Asm: VPMULLW, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x16) MulLow(y Int16x16) Int16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 + +// Asm: VPOR, Arch: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 + +// Asm: VPHADDW, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 + +// Asm: VPHSUBW, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 + +// Asm: VPADDSW, Arch: AVX2 +func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 + +// Asm: VPHADDSW, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 + +// Asm: VPHSUBSW, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 + +// Asm: VPSUBSW, Arch: AVX2 +func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 + +// Asm: VPSIGNW, Arch: AVX2 +func (x Int16x16) Sign(y Int16x16) Int16x16 + +// Asm: VPSUBW, Arch: AVX2 +func (x Int16x16) Sub(y Int16x16) Int16x16 + +// Asm: VPXOR, Arch: AVX2 +func (x Int16x16) Xor(y Int16x16) Int16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x32) Add(y Int16x32) Int16x32 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x32) Equal(y Int16x32) Mask16x32 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x32) Greater(y Int16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x32) Less(y Int16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x32) Min(y Int16x32) Int16x32 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x32) MulLow(y Int16x32) Int16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x32) Sub(y Int16x32) Int16x32 + +// Asm: VPADDW, Arch: AVX +func (x Int16x8) Add(y Int16x8) Int16x8 + +// Asm: VPAND, Arch: AVX +func (x Int16x8) And(y Int16x8) Int16x8 + +// Asm: VPANDN, Arch: AVX +func (x Int16x8) AndNot(y Int16x8) Int16x8 + +// Asm: VPCMPEQW, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int16x8) Equal(y Int16x8) Mask16x8 + +// Asm: VPCMPGTW, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Int16x8) Greater(y Int16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x8) Less(y Int16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 + +// Asm: VPMAXSW, Arch: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 + +// Asm: VPMINSW, Arch: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 + +// Asm: VPMULHW, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// Asm: VPMULLW, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x8) MulLow(y Int16x8) Int16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 + +// Asm: VPOR, Arch: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 + +// Asm: VPHADDW, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 + +// Asm: VPHSUBW, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 + +// Asm: VPADDSW, Arch: AVX +func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 + +// Asm: VPHADDSW, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 + +// Asm: VPHSUBSW, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 + +// Asm: VPSUBSW, Arch: AVX +func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 + +// Asm: VPSIGNW, Arch: AVX +func (x Int16x8) Sign(y Int16x8) Int16x8 + +// Asm: VPSUBW, Arch: AVX +func (x Int16x8) Sub(y Int16x8) Int16x8 + +// Asm: VPXOR, Arch: AVX +func (x Int16x8) Xor(y Int16x8) Int16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x16) Add(y Int32x16) Int32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x16) And(y Int32x16) Int32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x16) AndNot(y Int32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x16) Equal(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x16) Greater(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x16) Less(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x16) Min(y Int32x16) Int32x16 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x16) MulLow(y Int32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x16) Or(y Int32x16) Int32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x16) Sub(y Int32x16) Int32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x16) Xor(y Int32x16) Int32x16 + +// Asm: VPADDD, Arch: AVX +func (x Int32x4) Add(y Int32x4) Int32x4 + +// Asm: VPAND, Arch: AVX +func (x Int32x4) And(y Int32x4) Int32x4 + +// Asm: VPANDN, Arch: AVX +func (x Int32x4) AndNot(y Int32x4) Int32x4 + +// Asm: VPCMPEQD, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int32x4) Equal(y Int32x4) Mask32x4 + +// Asm: VPCMPGTD, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Int32x4) Greater(y Int32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x4) Less(y Int32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 + +// Asm: VPMAXSD, Arch: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 + +// Asm: VPMINSD, Arch: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 + +// Asm: VPMULDQ, Arch: AVX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// Asm: VPMULLD, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x4) MulLow(y Int32x4) Int32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 + +// Asm: VPOR, Arch: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 + +// Asm: VPHADDD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 + +// Asm: VPHSUBD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 + +// Asm: VPSIGND, Arch: AVX +func (x Int32x4) Sign(y Int32x4) Int32x4 + +// Asm: VPSUBD, Arch: AVX +func (x Int32x4) Sub(y Int32x4) Int32x4 + +// Asm: VPXOR, Arch: AVX +func (x Int32x4) Xor(y Int32x4) Int32x4 + +// Asm: VPADDD, Arch: AVX2 +func (x Int32x8) Add(y Int32x8) Int32x8 + +// Asm: VPAND, Arch: AVX2 +func (x Int32x8) And(y Int32x8) Int32x8 + +// Asm: VPANDN, Arch: AVX2 +func (x Int32x8) AndNot(y Int32x8) Int32x8 + +// Asm: VPCMPEQD, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int32x8) Equal(y Int32x8) Mask32x8 + +// Asm: VPCMPGTD, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int32x8) Greater(y Int32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x8) Less(y Int32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 + +// Asm: VPMAXSD, Arch: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 + +// Asm: VPMINSD, Arch: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 + +// Asm: VPMULDQ, Arch: AVX2, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 + +// Asm: VPMULLD, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x8) MulLow(y Int32x8) Int32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 + +// Asm: VPOR, Arch: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 + +// Asm: VPHADDD, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 + +// Asm: VPHSUBD, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 + +// Asm: VPSIGND, Arch: AVX2 +func (x Int32x8) Sign(y Int32x8) Int32x8 + +// Asm: VPSUBD, Arch: AVX2 +func (x Int32x8) Sub(y Int32x8) Int32x8 + +// Asm: VPXOR, Arch: AVX2 +func (x Int32x8) Xor(y Int32x8) Int32x8 + +// Asm: VPADDQ, Arch: AVX +func (x Int64x2) Add(y Int64x2) Int64x2 + +// Asm: VPAND, Arch: AVX +func (x Int64x2) And(y Int64x2) Int64x2 + +// Asm: VPANDN, Arch: AVX +func (x Int64x2) AndNot(y Int64x2) Int64x2 + +// Asm: VPCMPEQQ, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int64x2) Equal(y Int64x2) Mask64x2 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x2) Greater(y Int64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x2) Less(y Int64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x2) Min(y Int64x2) Int64x2 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x2) MulLow(y Int64x2) Int64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 + +// Asm: VPOR, Arch: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 + +// Asm: VPSUBQ, Arch: AVX +func (x Int64x2) Sub(y Int64x2) Int64x2 + +// Asm: VPXOR, Arch: AVX +func (x Int64x2) Xor(y Int64x2) Int64x2 + +// Asm: VPADDQ, Arch: AVX2 +func (x Int64x4) Add(y Int64x4) Int64x4 + +// Asm: VPAND, Arch: AVX2 +func (x Int64x4) And(y Int64x4) Int64x4 + +// Asm: VPANDN, Arch: AVX2 +func (x Int64x4) AndNot(y Int64x4) Int64x4 + +// Asm: VPCMPEQQ, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int64x4) Equal(y Int64x4) Mask64x4 + +// Asm: VPCMPGTQ, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int64x4) Greater(y Int64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x4) Less(y Int64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x4) Min(y Int64x4) Int64x4 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x4) MulLow(y Int64x4) Int64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 + +// Asm: VPOR, Arch: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 + +// Asm: VPSUBQ, Arch: AVX2 +func (x Int64x4) Sub(y Int64x4) Int64x4 + +// Asm: VPXOR, Arch: AVX2 +func (x Int64x4) Xor(y Int64x4) Int64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x8) Add(y Int64x8) Int64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x8) And(y Int64x8) Int64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x8) AndNot(y Int64x8) Int64x8 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x8) Equal(y Int64x8) Mask64x8 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x8) Greater(y Int64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x8) Less(y Int64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x8) Min(y Int64x8) Int64x8 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x8) MulLow(y Int64x8) Int64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x8) Or(y Int64x8) Int64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x8) Sub(y Int64x8) Int64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x8) Xor(y Int64x8) Int64x8 + +// Asm: VPADDB, Arch: AVX +func (x Int8x16) Add(y Int8x16) Int8x16 + +// Asm: VPAND, Arch: AVX +func (x Int8x16) And(y Int8x16) Int8x16 + +// Asm: VPANDN, Arch: AVX +func (x Int8x16) AndNot(y Int8x16) Int8x16 + +// Asm: VPCMPEQB, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int8x16) Equal(y Int8x16) Mask8x16 + +// Asm: VPCMPGTB, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Int8x16) Greater(y Int8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x16) Less(y Int8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 + +// Asm: VPMAXSB, Arch: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 + +// Asm: VPMINSB, Arch: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 + +// Asm: VPOR, Arch: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 + +// Asm: VPADDSB, Arch: AVX +func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 + +// Asm: VPSUBSB, Arch: AVX +func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 + +// Asm: VPSIGNB, Arch: AVX +func (x Int8x16) Sign(y Int8x16) Int8x16 + +// Asm: VPSUBB, Arch: AVX +func (x Int8x16) Sub(y Int8x16) Int8x16 + +// Asm: VPXOR, Arch: AVX +func (x Int8x16) Xor(y Int8x16) Int8x16 + +// Asm: VPADDB, Arch: AVX2 +func (x Int8x32) Add(y Int8x32) Int8x32 + +// Asm: VPAND, Arch: AVX2 +func (x Int8x32) And(y Int8x32) Int8x32 + +// Asm: VPANDN, Arch: AVX2 +func (x Int8x32) AndNot(y Int8x32) Int8x32 + +// Asm: VPCMPEQB, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int8x32) Equal(y Int8x32) Mask8x32 + +// Asm: VPCMPGTB, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int8x32) Greater(y Int8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x32) Less(y Int8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 + +// Asm: VPMAXSB, Arch: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 + +// Asm: VPMINSB, Arch: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 + +// Asm: VPOR, Arch: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 + +// Asm: VPADDSB, Arch: AVX2 +func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 + +// Asm: VPSUBSB, Arch: AVX2 +func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 + +// Asm: VPSIGNB, Arch: AVX2 +func (x Int8x32) Sign(y Int8x32) Int8x32 + +// Asm: VPSUBB, Arch: AVX2 +func (x Int8x32) Sub(y Int8x32) Int8x32 + +// Asm: VPXOR, Arch: AVX2 +func (x Int8x32) Xor(y Int8x32) Int8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x64) Add(y Int8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x64) Equal(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x64) Greater(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x64) Less(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x64) Min(y Int8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x64) Sub(y Int8x64) Int8x64 + +// Asm: VPADDW, Arch: AVX2 +func (x Uint16x16) Add(y Uint16x16) Uint16x16 + +// Asm: VPAND, Arch: AVX2 +func (x Uint16x16) And(y Uint16x16) Uint16x16 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 + +// Asm: VPAVGW, Arch: AVX2 +func (x Uint16x16) Average(y Uint16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x16) Equal(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x16) Less(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 + +// Asm: VPMAXUW, Arch: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 + +// Asm: VPMINUW, Arch: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 + +// Asm: VPMULHUW, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 + +// Asm: VPOR, Arch: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Asm: VPHADDW, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 + +// Asm: VPHSUBW, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 + +// Asm: VPADDSW, Arch: AVX2 +func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 + +// Asm: VPSUBSW, Arch: AVX2 +func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 + +// Asm: VPSUBW, Arch: AVX2 +func (x Uint16x16) Sub(y Uint16x16) Uint16x16 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint16x16) Xor(y Uint16x16) Uint16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x32) Add(y Uint16x32) Uint16x32 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x32) Average(y Uint16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x32) Equal(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x32) Greater(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x32) Less(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x32) Min(y Uint16x32) Uint16x32 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x32) Sub(y Uint16x32) Uint16x32 + +// Asm: VPADDW, Arch: AVX +func (x Uint16x8) Add(y Uint16x8) Uint16x8 + +// Asm: VPAND, Arch: AVX +func (x Uint16x8) And(y Uint16x8) Uint16x8 + +// Asm: VPANDN, Arch: AVX +func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 + +// Asm: VPAVGW, Arch: AVX +func (x Uint16x8) Average(y Uint16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x8) Equal(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x8) Less(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 + +// Asm: VPMAXUW, Arch: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 + +// Asm: VPMINUW, Arch: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 + +// Asm: VPMULHUW, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 + +// Asm: VPOR, Arch: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 + +// Asm: VPHADDW, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 + +// Asm: VPHSUBW, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 + +// Asm: VPADDSW, Arch: AVX +func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 + +// Asm: VPSUBSW, Arch: AVX +func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 + +// Asm: VPSUBW, Arch: AVX +func (x Uint16x8) Sub(y Uint16x8) Uint16x8 + +// Asm: VPXOR, Arch: AVX +func (x Uint16x8) Xor(y Uint16x8) Uint16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x16) Add(y Uint32x16) Uint32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x16) And(y Uint32x16) Uint32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x16) Equal(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x16) Greater(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x16) Less(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x16) Min(y Uint32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x16) Sub(y Uint32x16) Uint32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x16) Xor(y Uint32x16) Uint32x16 + +// Asm: VPADDD, Arch: AVX +func (x Uint32x4) Add(y Uint32x4) Uint32x4 + +// Asm: VPAND, Arch: AVX +func (x Uint32x4) And(y Uint32x4) Uint32x4 + +// Asm: VPANDN, Arch: AVX +func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x4) Equal(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x4) Less(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 + +// Asm: VPMAXUD, Arch: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 + +// Asm: VPMINUD, Arch: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 + +// Asm: VPMULUDQ, Arch: AVX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 + +// Asm: VPOR, Arch: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Asm: VPHADDD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 + +// Asm: VPHSUBD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 + +// Asm: VPSUBD, Arch: AVX +func (x Uint32x4) Sub(y Uint32x4) Uint32x4 + +// Asm: VPXOR, Arch: AVX +func (x Uint32x4) Xor(y Uint32x4) Uint32x4 + +// Asm: VPADDD, Arch: AVX2 +func (x Uint32x8) Add(y Uint32x8) Uint32x8 + +// Asm: VPAND, Arch: AVX2 +func (x Uint32x8) And(y Uint32x8) Uint32x8 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x8) Equal(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x8) Less(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 + +// Asm: VPMAXUD, Arch: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 + +// Asm: VPMINUD, Arch: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 + +// Asm: VPMULUDQ, Arch: AVX2, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 + +// Asm: VPOR, Arch: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Asm: VPHADDD, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 + +// Asm: VPHSUBD, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 + +// Asm: VPSUBD, Arch: AVX2 +func (x Uint32x8) Sub(y Uint32x8) Uint32x8 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint32x8) Xor(y Uint32x8) Uint32x8 + +// Asm: VPADDQ, Arch: AVX +func (x Uint64x2) Add(y Uint64x2) Uint64x2 + +// Asm: VPAND, Arch: AVX +func (x Uint64x2) And(y Uint64x2) Uint64x2 + +// Asm: VPANDN, Arch: AVX +func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x2) Equal(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x2) Less(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x2) Min(y Uint64x2) Uint64x2 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 + +// Asm: VPOR, Arch: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Asm: VPSUBQ, Arch: AVX +func (x Uint64x2) Sub(y Uint64x2) Uint64x2 + +// Asm: VPXOR, Arch: AVX +func (x Uint64x2) Xor(y Uint64x2) Uint64x2 + +// Asm: VPADDQ, Arch: AVX2 +func (x Uint64x4) Add(y Uint64x4) Uint64x4 + +// Asm: VPAND, Arch: AVX2 +func (x Uint64x4) And(y Uint64x4) Uint64x4 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x4) Equal(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x4) Less(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x4) Min(y Uint64x4) Uint64x4 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 + +// Asm: VPOR, Arch: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Asm: VPSUBQ, Arch: AVX2 +func (x Uint64x4) Sub(y Uint64x4) Uint64x4 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint64x4) Xor(y Uint64x4) Uint64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x8) Add(y Uint64x8) Uint64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x8) And(y Uint64x8) Uint64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x8) Equal(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x8) Greater(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x8) Less(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x8) Min(y Uint64x8) Uint64x8 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x8) Sub(y Uint64x8) Uint64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x8) Xor(y Uint64x8) Uint64x8 + +// Asm: VPADDB, Arch: AVX +func (x Uint8x16) Add(y Uint8x16) Uint8x16 + +// Asm: VPAND, Arch: AVX +func (x Uint8x16) And(y Uint8x16) Uint8x16 + +// Asm: VPANDN, Arch: AVX +func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 + +// Asm: VPAVGB, Arch: AVX +func (x Uint8x16) Average(y Uint8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x16) Equal(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x16) Less(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 + +// Asm: VPMAXUB, Arch: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 + +// Asm: VPMINUB, Arch: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 + +// Asm: VPOR, Arch: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 + +// Asm: VPADDSB, Arch: AVX +func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 + +// Asm: VPSUBSB, Arch: AVX +func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 + +// Asm: VPSUBB, Arch: AVX +func (x Uint8x16) Sub(y Uint8x16) Uint8x16 + +// Asm: VPXOR, Arch: AVX +func (x Uint8x16) Xor(y Uint8x16) Uint8x16 + +// Asm: VPADDB, Arch: AVX2 +func (x Uint8x32) Add(y Uint8x32) Uint8x32 + +// Asm: VPAND, Arch: AVX2 +func (x Uint8x32) And(y Uint8x32) Uint8x32 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 + +// Asm: VPAVGB, Arch: AVX2 +func (x Uint8x32) Average(y Uint8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x32) Equal(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x32) Less(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 + +// Asm: VPMAXUB, Arch: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 + +// Asm: VPMINUB, Arch: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 + +// Asm: VPOR, Arch: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 + +// Asm: VPADDSB, Arch: AVX2 +func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 + +// Asm: VPSUBSB, Arch: AVX2 +func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 + +// Asm: VPSUBB, Arch: AVX2 +func (x Uint8x32) Sub(y Uint8x32) Uint8x32 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint8x32) Xor(y Uint8x32) Uint8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x64) Add(y Uint8x64) Uint8x64 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x64) Average(y Uint8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x64) Equal(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x64) Greater(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x64) Less(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x64) Min(y Uint8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x64) Sub(y Uint8x64) Uint8x64 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 + +// Int32x8 converts from Int16x16 to Int32x8 +func (from Int16x16) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Int16x16 to Uint64x4 +func (from Int16x16) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Int16x16 to Int64x4 +func (from Int16x16) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Int16x16 to Float64x4 +func (from Int16x16) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int16x16 to Float32x8 +func (from Int16x16) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int16x16 to Uint16x16 +func (from Int16x16) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Int16x16 to Int8x32 +func (from Int16x16) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Int16x16 to Uint8x32 +func (from Int16x16) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int16x16 to Uint32x8 +func (from Int16x16) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Int32x8 to Int16x16 +func (from Int32x8) AsInt16x16() (to Int16x16) + +// Uint64x4 converts from Int32x8 to Uint64x4 +func (from Int32x8) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Int32x8 to Int64x4 +func (from Int32x8) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Int32x8 to Float64x4 +func (from Int32x8) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int32x8 to Float32x8 +func (from Int32x8) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int32x8 to Uint16x16 +func (from Int32x8) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Int32x8 to Int8x32 +func (from Int32x8) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Int32x8 to Uint8x32 +func (from Int32x8) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int32x8 to Uint32x8 +func (from Int32x8) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint64x4 to Int16x16 +func (from Uint64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint64x4 to Int32x8 +func (from Uint64x4) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Uint64x4 to Int64x4 +func (from Uint64x4) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint64x4 to Float64x4 +func (from Uint64x4) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint64x4 to Float32x8 +func (from Uint64x4) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Uint64x4 to Uint16x16 +func (from Uint64x4) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Uint64x4 to Int8x32 +func (from Uint64x4) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Uint64x4 to Uint8x32 +func (from Uint64x4) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Uint64x4 to Uint32x8 +func (from Uint64x4) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Int64x4 to Int16x16 +func (from Int64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Int64x4 to Int32x8 +func (from Int64x4) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Int64x4 to Uint64x4 +func (from Int64x4) AsUint64x4() (to Uint64x4) + +// Float64x4 converts from Int64x4 to Float64x4 +func (from Int64x4) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int64x4 to Float32x8 +func (from Int64x4) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int64x4 to Uint16x16 +func (from Int64x4) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Int64x4 to Int8x32 +func (from Int64x4) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Int64x4 to Uint8x32 +func (from Int64x4) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int64x4 to Uint32x8 +func (from Int64x4) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Float64x4 to Int16x16 +func (from Float64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Float64x4 to Int32x8 +func (from Float64x4) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Float64x4 to Uint64x4 +func (from Float64x4) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Float64x4 to Int64x4 +func (from Float64x4) AsInt64x4() (to Int64x4) + +// Float32x8 converts from Float64x4 to Float32x8 +func (from Float64x4) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Float64x4 to Uint16x16 +func (from Float64x4) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Float64x4 to Int8x32 +func (from Float64x4) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Float64x4 to Uint8x32 +func (from Float64x4) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Float64x4 to Uint32x8 +func (from Float64x4) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Float32x8 to Int16x16 +func (from Float32x8) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Float32x8 to Int32x8 +func (from Float32x8) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Float32x8 to Uint64x4 +func (from Float32x8) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Float32x8 to Int64x4 +func (from Float32x8) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Float32x8 to Float64x4 +func (from Float32x8) AsFloat64x4() (to Float64x4) + +// Uint16x16 converts from Float32x8 to Uint16x16 +func (from Float32x8) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Float32x8 to Int8x32 +func (from Float32x8) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Float32x8 to Uint8x32 +func (from Float32x8) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Float32x8 to Uint32x8 +func (from Float32x8) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint16x16 to Int16x16 +func (from Uint16x16) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint16x16 to Int32x8 +func (from Uint16x16) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Uint16x16 to Uint64x4 +func (from Uint16x16) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Uint16x16 to Int64x4 +func (from Uint16x16) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint16x16 to Float64x4 +func (from Uint16x16) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint16x16 to Float32x8 +func (from Uint16x16) AsFloat32x8() (to Float32x8) + +// Int8x32 converts from Uint16x16 to Int8x32 +func (from Uint16x16) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Uint16x16 to Uint8x32 +func (from Uint16x16) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Uint16x16 to Uint32x8 +func (from Uint16x16) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Int8x32 to Int16x16 +func (from Int8x32) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Int8x32 to Int32x8 +func (from Int8x32) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Int8x32 to Uint64x4 +func (from Int8x32) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Int8x32 to Int64x4 +func (from Int8x32) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Int8x32 to Float64x4 +func (from Int8x32) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int8x32 to Float32x8 +func (from Int8x32) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int8x32 to Uint16x16 +func (from Int8x32) AsUint16x16() (to Uint16x16) + +// Uint8x32 converts from Int8x32 to Uint8x32 +func (from Int8x32) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int8x32 to Uint32x8 +func (from Int8x32) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint8x32 to Int16x16 +func (from Uint8x32) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint8x32 to Int32x8 +func (from Uint8x32) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Uint8x32 to Uint64x4 +func (from Uint8x32) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Uint8x32 to Int64x4 +func (from Uint8x32) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint8x32 to Float64x4 +func (from Uint8x32) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint8x32 to Float32x8 +func (from Uint8x32) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Uint8x32 to Uint16x16 +func (from Uint8x32) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Uint8x32 to Int8x32 +func (from Uint8x32) AsInt8x32() (to Int8x32) + +// Uint32x8 converts from Uint8x32 to Uint32x8 +func (from Uint8x32) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint32x8 to Int16x16 +func (from Uint32x8) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint32x8 to Int32x8 +func (from Uint32x8) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Uint32x8 to Uint64x4 +func (from Uint32x8) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Uint32x8 to Int64x4 +func (from Uint32x8) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint32x8 to Float64x4 +func (from Uint32x8) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint32x8 to Float32x8 +func (from Uint32x8) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Uint32x8 to Uint16x16 +func (from Uint32x8) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Uint32x8 to Int8x32 +func (from Uint32x8) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Uint32x8 to Uint8x32 +func (from Uint32x8) AsUint8x32() (to Uint8x32) + +// Int64x8 converts from Float64x8 to Int64x8 +func (from Float64x8) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float64x8 to Uint8x64 +func (from Float64x8) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Float64x8 to Int8x64 +func (from Float64x8) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Float64x8 to Float32x16 +func (from Float64x8) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Float64x8 to Int32x16 +func (from Float64x8) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Float64x8 to Uint16x32 +func (from Float64x8) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Float64x8 to Int16x32 +func (from Float64x8) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Float64x8 to Uint64x8 +func (from Float64x8) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Float64x8 to Uint32x16 +func (from Float64x8) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int64x8 to Float64x8 +func (from Int64x8) AsFloat64x8() (to Float64x8) + +// Uint8x64 converts from Int64x8 to Uint8x64 +func (from Int64x8) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Int64x8 to Int8x64 +func (from Int64x8) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Int64x8 to Float32x16 +func (from Int64x8) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Int64x8 to Int32x16 +func (from Int64x8) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Int64x8 to Uint16x32 +func (from Int64x8) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Int64x8 to Int16x32 +func (from Int64x8) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Int64x8 to Uint64x8 +func (from Int64x8) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int64x8 to Uint32x16 +func (from Int64x8) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint8x64 to Float64x8 +func (from Uint8x64) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint8x64 to Int64x8 +func (from Uint8x64) AsInt64x8() (to Int64x8) + +// Int8x64 converts from Uint8x64 to Int8x64 +func (from Uint8x64) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint8x64 to Float32x16 +func (from Uint8x64) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint8x64 to Int32x16 +func (from Uint8x64) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Uint8x64 to Uint16x32 +func (from Uint8x64) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Uint8x64 to Int16x32 +func (from Uint8x64) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Uint8x64 to Uint64x8 +func (from Uint8x64) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Uint8x64 to Uint32x16 +func (from Uint8x64) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int8x64 to Float64x8 +func (from Int8x64) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Int8x64 to Int64x8 +func (from Int8x64) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int8x64 to Uint8x64 +func (from Int8x64) AsUint8x64() (to Uint8x64) + +// Float32x16 converts from Int8x64 to Float32x16 +func (from Int8x64) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Int8x64 to Int32x16 +func (from Int8x64) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Int8x64 to Uint16x32 +func (from Int8x64) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Int8x64 to Int16x32 +func (from Int8x64) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Int8x64 to Uint64x8 +func (from Int8x64) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int8x64 to Uint32x16 +func (from Int8x64) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Float32x16 to Float64x8 +func (from Float32x16) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Float32x16 to Int64x8 +func (from Float32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float32x16 to Uint8x64 +func (from Float32x16) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Float32x16 to Int8x64 +func (from Float32x16) AsInt8x64() (to Int8x64) + +// Int32x16 converts from Float32x16 to Int32x16 +func (from Float32x16) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Float32x16 to Uint16x32 +func (from Float32x16) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Float32x16 to Int16x32 +func (from Float32x16) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Float32x16 to Uint64x8 +func (from Float32x16) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Float32x16 to Uint32x16 +func (from Float32x16) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int32x16 to Float64x8 +func (from Int32x16) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Int32x16 to Int64x8 +func (from Int32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int32x16 to Uint8x64 +func (from Int32x16) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Int32x16 to Int8x64 +func (from Int32x16) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Int32x16 to Float32x16 +func (from Int32x16) AsFloat32x16() (to Float32x16) + +// Uint16x32 converts from Int32x16 to Uint16x32 +func (from Int32x16) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Int32x16 to Int16x32 +func (from Int32x16) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Int32x16 to Uint64x8 +func (from Int32x16) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int32x16 to Uint32x16 +func (from Int32x16) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint16x32 to Float64x8 +func (from Uint16x32) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint16x32 to Int64x8 +func (from Uint16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint16x32 to Uint8x64 +func (from Uint16x32) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Uint16x32 to Int8x64 +func (from Uint16x32) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint16x32 to Float32x16 +func (from Uint16x32) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint16x32 to Int32x16 +func (from Uint16x32) AsInt32x16() (to Int32x16) + +// Int16x32 converts from Uint16x32 to Int16x32 +func (from Uint16x32) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Uint16x32 to Uint64x8 +func (from Uint16x32) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Uint16x32 to Uint32x16 +func (from Uint16x32) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int16x32 to Float64x8 +func (from Int16x32) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Int16x32 to Int64x8 +func (from Int16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int16x32 to Uint8x64 +func (from Int16x32) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Int16x32 to Int8x64 +func (from Int16x32) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Int16x32 to Float32x16 +func (from Int16x32) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Int16x32 to Int32x16 +func (from Int16x32) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Int16x32 to Uint16x32 +func (from Int16x32) AsUint16x32() (to Uint16x32) + +// Uint64x8 converts from Int16x32 to Uint64x8 +func (from Int16x32) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int16x32 to Uint32x16 +func (from Int16x32) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint64x8 to Float64x8 +func (from Uint64x8) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint64x8 to Int64x8 +func (from Uint64x8) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint64x8 to Uint8x64 +func (from Uint64x8) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Uint64x8 to Int8x64 +func (from Uint64x8) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint64x8 to Float32x16 +func (from Uint64x8) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint64x8 to Int32x16 +func (from Uint64x8) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Uint64x8 to Uint16x32 +func (from Uint64x8) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Uint64x8 to Int16x32 +func (from Uint64x8) AsInt16x32() (to Int16x32) + +// Uint32x16 converts from Uint64x8 to Uint32x16 +func (from Uint64x8) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint32x16 to Float64x8 +func (from Uint32x16) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint32x16 to Int64x8 +func (from Uint32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint32x16 to Uint8x64 +func (from Uint32x16) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Uint32x16 to Int8x64 +func (from Uint32x16) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint32x16 to Float32x16 +func (from Uint32x16) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint32x16 to Int32x16 +func (from Uint32x16) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Uint32x16 to Uint16x32 +func (from Uint32x16) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Uint32x16 to Int16x32 +func (from Uint32x16) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Uint32x16 to Uint64x8 +func (from Uint32x16) AsUint64x8() (to Uint64x8) + +// Int8x16 converts from Int32x4 to Int8x16 +func (from Int32x4) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Int32x4 to Uint16x8 +func (from Int32x4) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Int32x4 to Int16x8 +func (from Int32x4) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Int32x4 to Float32x4 +func (from Int32x4) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int32x4 to Uint64x2 +func (from Int32x4) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int32x4 to Float64x2 +func (from Int32x4) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Int32x4 to Int64x2 +func (from Int32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int32x4 to Uint8x16 +func (from Int32x4) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int32x4 to Uint32x4 +func (from Int32x4) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Int8x16 to Int32x4 +func (from Int8x16) AsInt32x4() (to Int32x4) + +// Uint16x8 converts from Int8x16 to Uint16x8 +func (from Int8x16) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Int8x16 to Int16x8 +func (from Int8x16) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Int8x16 to Float32x4 +func (from Int8x16) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int8x16 to Uint64x2 +func (from Int8x16) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int8x16 to Float64x2 +func (from Int8x16) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Int8x16 to Int64x2 +func (from Int8x16) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int8x16 to Uint8x16 +func (from Int8x16) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int8x16 to Uint32x4 +func (from Int8x16) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint16x8 to Int32x4 +func (from Uint16x8) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint16x8 to Int8x16 +func (from Uint16x8) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint16x8 to Int16x8 +func (from Uint16x8) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint16x8 to Float32x4 +func (from Uint16x8) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Uint16x8 to Uint64x2 +func (from Uint16x8) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Uint16x8 to Float64x2 +func (from Uint16x8) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint16x8 to Int64x2 +func (from Uint16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint16x8 to Uint8x16 +func (from Uint16x8) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Uint16x8 to Uint32x4 +func (from Uint16x8) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Int16x8 to Int32x4 +func (from Int16x8) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Int16x8 to Int8x16 +func (from Int16x8) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Int16x8 to Uint16x8 +func (from Int16x8) AsUint16x8() (to Uint16x8) + +// Float32x4 converts from Int16x8 to Float32x4 +func (from Int16x8) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int16x8 to Uint64x2 +func (from Int16x8) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int16x8 to Float64x2 +func (from Int16x8) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Int16x8 to Int64x2 +func (from Int16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int16x8 to Uint8x16 +func (from Int16x8) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int16x8 to Uint32x4 +func (from Int16x8) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Float32x4 to Int32x4 +func (from Float32x4) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Float32x4 to Int8x16 +func (from Float32x4) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Float32x4 to Uint16x8 +func (from Float32x4) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Float32x4 to Int16x8 +func (from Float32x4) AsInt16x8() (to Int16x8) + +// Uint64x2 converts from Float32x4 to Uint64x2 +func (from Float32x4) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Float32x4 to Float64x2 +func (from Float32x4) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Float32x4 to Int64x2 +func (from Float32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Float32x4 to Uint8x16 +func (from Float32x4) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Float32x4 to Uint32x4 +func (from Float32x4) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint64x2 to Int32x4 +func (from Uint64x2) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint64x2 to Int8x16 +func (from Uint64x2) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Uint64x2 to Uint16x8 +func (from Uint64x2) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Uint64x2 to Int16x8 +func (from Uint64x2) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint64x2 to Float32x4 +func (from Uint64x2) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint64x2 to Float64x2 +func (from Uint64x2) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint64x2 to Int64x2 +func (from Uint64x2) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint64x2 to Uint8x16 +func (from Uint64x2) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Uint64x2 to Uint32x4 +func (from Uint64x2) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Float64x2 to Int32x4 +func (from Float64x2) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Float64x2 to Int8x16 +func (from Float64x2) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Float64x2 to Uint16x8 +func (from Float64x2) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Float64x2 to Int16x8 +func (from Float64x2) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Float64x2 to Float32x4 +func (from Float64x2) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Float64x2 to Uint64x2 +func (from Float64x2) AsUint64x2() (to Uint64x2) + +// Int64x2 converts from Float64x2 to Int64x2 +func (from Float64x2) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Float64x2 to Uint8x16 +func (from Float64x2) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Float64x2 to Uint32x4 +func (from Float64x2) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Int64x2 to Int32x4 +func (from Int64x2) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Int64x2 to Int8x16 +func (from Int64x2) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Int64x2 to Uint16x8 +func (from Int64x2) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Int64x2 to Int16x8 +func (from Int64x2) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Int64x2 to Float32x4 +func (from Int64x2) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int64x2 to Uint64x2 +func (from Int64x2) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int64x2 to Float64x2 +func (from Int64x2) AsFloat64x2() (to Float64x2) + +// Uint8x16 converts from Int64x2 to Uint8x16 +func (from Int64x2) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int64x2 to Uint32x4 +func (from Int64x2) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint8x16 to Int32x4 +func (from Uint8x16) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint8x16 to Int8x16 +func (from Uint8x16) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Uint8x16 to Uint16x8 +func (from Uint8x16) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Uint8x16 to Int16x8 +func (from Uint8x16) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint8x16 to Float32x4 +func (from Uint8x16) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Uint8x16 to Uint64x2 +func (from Uint8x16) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Uint8x16 to Float64x2 +func (from Uint8x16) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint8x16 to Int64x2 +func (from Uint8x16) AsInt64x2() (to Int64x2) + +// Uint32x4 converts from Uint8x16 to Uint32x4 +func (from Uint8x16) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint32x4 to Int32x4 +func (from Uint32x4) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint32x4 to Int8x16 +func (from Uint32x4) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Uint32x4 to Uint16x8 +func (from Uint32x4) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Uint32x4 to Int16x8 +func (from Uint32x4) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint32x4 to Float32x4 +func (from Uint32x4) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Uint32x4 to Uint64x2 +func (from Uint32x4) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Uint32x4 to Float64x2 +func (from Uint32x4) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint32x4 to Int64x2 +func (from Uint32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint32x4 to Uint8x16 +func (from Uint32x4) AsUint8x16() (to Uint8x16) + +// converts from Mask64x4 to Int64x4 +func (from Mask64x4) AsInt64x4() (to Int64x4) + +// converts from Int64x4 to Mask64x4 +func (from Int64x4) AsMask64x4() (to Mask64x4) + +func (x Mask64x4) And(y Mask64x4) Mask64x4 + +func (x Mask64x4) Or(y Mask64x4) Mask64x4 + +// converts from Mask16x16 to Int16x16 +func (from Mask16x16) AsInt16x16() (to Int16x16) + +// converts from Int16x16 to Mask16x16 +func (from Int16x16) AsMask16x16() (to Mask16x16) + +func (x Mask16x16) And(y Mask16x16) Mask16x16 + +func (x Mask16x16) Or(y Mask16x16) Mask16x16 + +// converts from Mask32x8 to Int32x8 +func (from Mask32x8) AsInt32x8() (to Int32x8) + +// converts from Int32x8 to Mask32x8 +func (from Int32x8) AsMask32x8() (to Mask32x8) + +func (x Mask32x8) And(y Mask32x8) Mask32x8 + +func (x Mask32x8) Or(y Mask32x8) Mask32x8 + +// converts from Mask8x32 to Int8x32 +func (from Mask8x32) AsInt8x32() (to Int8x32) + +// converts from Int8x32 to Mask8x32 +func (from Int8x32) AsMask8x32() (to Mask8x32) + +func (x Mask8x32) And(y Mask8x32) Mask8x32 + +func (x Mask8x32) Or(y Mask8x32) Mask8x32 + +// converts from Mask64x8 to Int64x8 +func (from Mask64x8) AsInt64x8() (to Int64x8) + +// converts from Int64x8 to Mask64x8 +func (from Int64x8) AsMask64x8() (to Mask64x8) + +func (x Mask64x8) And(y Mask64x8) Mask64x8 + +func (x Mask64x8) Or(y Mask64x8) Mask64x8 + +// converts from Mask8x64 to Int8x64 +func (from Mask8x64) AsInt8x64() (to Int8x64) + +// converts from Int8x64 to Mask8x64 +func (from Int8x64) AsMask8x64() (to Mask8x64) + +func (x Mask8x64) And(y Mask8x64) Mask8x64 + +func (x Mask8x64) Or(y Mask8x64) Mask8x64 + +// converts from Mask32x16 to Int32x16 +func (from Mask32x16) AsInt32x16() (to Int32x16) + +// converts from Int32x16 to Mask32x16 +func (from Int32x16) AsMask32x16() (to Mask32x16) + +func (x Mask32x16) And(y Mask32x16) Mask32x16 + +func (x Mask32x16) Or(y Mask32x16) Mask32x16 + +// converts from Mask16x32 to Int16x32 +func (from Mask16x32) AsInt16x32() (to Int16x32) + +// converts from Int16x32 to Mask16x32 +func (from Int16x32) AsMask16x32() (to Mask16x32) + +func (x Mask16x32) And(y Mask16x32) Mask16x32 + +func (x Mask16x32) Or(y Mask16x32) Mask16x32 + +// converts from Mask32x4 to Int32x4 +func (from Mask32x4) AsInt32x4() (to Int32x4) + +// converts from Int32x4 to Mask32x4 +func (from Int32x4) AsMask32x4() (to Mask32x4) + +func (x Mask32x4) And(y Mask32x4) Mask32x4 + +func (x Mask32x4) Or(y Mask32x4) Mask32x4 + +// converts from Mask8x16 to Int8x16 +func (from Mask8x16) AsInt8x16() (to Int8x16) + +// converts from Int8x16 to Mask8x16 +func (from Int8x16) AsMask8x16() (to Mask8x16) + +func (x Mask8x16) And(y Mask8x16) Mask8x16 + +func (x Mask8x16) Or(y Mask8x16) Mask8x16 + +// converts from Mask16x8 to Int16x8 +func (from Mask16x8) AsInt16x8() (to Int16x8) + +// converts from Int16x8 to Mask16x8 +func (from Int16x8) AsMask16x8() (to Mask16x8) + +func (x Mask16x8) And(y Mask16x8) Mask16x8 + +func (x Mask16x8) Or(y Mask16x8) Mask16x8 + +// converts from Mask64x2 to Int64x2 +func (from Mask64x2) AsInt64x2() (to Int64x2) + +// converts from Int64x2 to Mask64x2 +func (from Int64x2) AsMask64x2() (to Mask64x2) + +func (x Mask64x2) And(y Mask64x2) Mask64x2 + +func (x Mask64x2) Or(y Mask64x2) Mask64x2 diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go new file mode 100644 index 00000000000000..28322fe3bf3873 --- /dev/null +++ b/src/simd/types_amd64.go @@ -0,0 +1,662 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// v128 is a tag type that tells the compiler that this is really 128-bit SIMD +type v128 struct { + _128 struct{} +} + +// Int32x4 is a 128-bit SIMD vector of 4 int32 +type Int32x4 struct { + int32x4 v128 + vals [4]int32 +} + +// Len returns the number of elements in a Int32x4 +func (x Int32x4) Len() int { return 4 } + +// LoadInt32x4 loads a Int32x4 from an array +// +//go:noescape +func LoadInt32x4(y *[4]int32) Int32x4 + +// Store stores a Int32x4 to an array +// +//go:noescape +func (x Int32x4) Store(y *[4]int32) + +// Mask32x4 is a 128-bit SIMD vector of 4 int32 +type Mask32x4 struct { + int32x4 v128 + vals [4]int32 +} + +// Int8x16 is a 128-bit SIMD vector of 16 int8 +type Int8x16 struct { + int8x16 v128 + vals [16]int8 +} + +// Len returns the number of elements in a Int8x16 +func (x Int8x16) Len() int { return 16 } + +// LoadInt8x16 loads a Int8x16 from an array +// +//go:noescape +func LoadInt8x16(y *[16]int8) Int8x16 + +// Store stores a Int8x16 to an array +// +//go:noescape +func (x Int8x16) Store(y *[16]int8) + +// Mask8x16 is a 128-bit SIMD vector of 16 int8 +type Mask8x16 struct { + int8x16 v128 + vals [16]int8 +} + +// Uint16x8 is a 128-bit SIMD vector of 8 uint16 +type Uint16x8 struct { + uint16x8 v128 + vals [8]uint16 +} + +// Len returns the number of elements in a Uint16x8 +func (x Uint16x8) Len() int { return 8 } + +// LoadUint16x8 loads a Uint16x8 from an array +// +//go:noescape +func LoadUint16x8(y *[8]uint16) Uint16x8 + +// Store stores a Uint16x8 to an array +// +//go:noescape +func (x Uint16x8) Store(y *[8]uint16) + +// Mask16x8 is a 128-bit SIMD vector of 8 int16 +type Mask16x8 struct { + int16x8 v128 + vals [8]int16 +} + +// Int16x8 is a 128-bit SIMD vector of 8 int16 +type Int16x8 struct { + int16x8 v128 + vals [8]int16 +} + +// Len returns the number of elements in a Int16x8 +func (x Int16x8) Len() int { return 8 } + +// LoadInt16x8 loads a Int16x8 from an array +// +//go:noescape +func LoadInt16x8(y *[8]int16) Int16x8 + +// Store stores a Int16x8 to an array +// +//go:noescape +func (x Int16x8) Store(y *[8]int16) + +// Float32x4 is a 128-bit SIMD vector of 4 float32 +type Float32x4 struct { + float32x4 v128 + vals [4]float32 +} + +// Len returns the number of elements in a Float32x4 +func (x Float32x4) Len() int { return 4 } + +// LoadFloat32x4 loads a Float32x4 from an array +// +//go:noescape +func LoadFloat32x4(y *[4]float32) Float32x4 + +// Store stores a Float32x4 to an array +// +//go:noescape +func (x Float32x4) Store(y *[4]float32) + +// Uint64x2 is a 128-bit SIMD vector of 2 uint64 +type Uint64x2 struct { + uint64x2 v128 + vals [2]uint64 +} + +// Len returns the number of elements in a Uint64x2 +func (x Uint64x2) Len() int { return 2 } + +// LoadUint64x2 loads a Uint64x2 from an array +// +//go:noescape +func LoadUint64x2(y *[2]uint64) Uint64x2 + +// Store stores a Uint64x2 to an array +// +//go:noescape +func (x Uint64x2) Store(y *[2]uint64) + +// Float64x2 is a 128-bit SIMD vector of 2 float64 +type Float64x2 struct { + float64x2 v128 + vals [2]float64 +} + +// Len returns the number of elements in a Float64x2 +func (x Float64x2) Len() int { return 2 } + +// LoadFloat64x2 loads a Float64x2 from an array +// +//go:noescape +func LoadFloat64x2(y *[2]float64) Float64x2 + +// Store stores a Float64x2 to an array +// +//go:noescape +func (x Float64x2) Store(y *[2]float64) + +// Mask64x2 is a 128-bit SIMD vector of 2 int64 +type Mask64x2 struct { + int64x2 v128 + vals [2]int64 +} + +// Int64x2 is a 128-bit SIMD vector of 2 int64 +type Int64x2 struct { + int64x2 v128 + vals [2]int64 +} + +// Len returns the number of elements in a Int64x2 +func (x Int64x2) Len() int { return 2 } + +// LoadInt64x2 loads a Int64x2 from an array +// +//go:noescape +func LoadInt64x2(y *[2]int64) Int64x2 + +// Store stores a Int64x2 to an array +// +//go:noescape +func (x Int64x2) Store(y *[2]int64) + +// Uint8x16 is a 128-bit SIMD vector of 16 uint8 +type Uint8x16 struct { + uint8x16 v128 + vals [16]uint8 +} + +// Len returns the number of elements in a Uint8x16 +func (x Uint8x16) Len() int { return 16 } + +// LoadUint8x16 loads a Uint8x16 from an array +// +//go:noescape +func LoadUint8x16(y *[16]uint8) Uint8x16 + +// Store stores a Uint8x16 to an array +// +//go:noescape +func (x Uint8x16) Store(y *[16]uint8) + +// Uint32x4 is a 128-bit SIMD vector of 4 uint32 +type Uint32x4 struct { + uint32x4 v128 + vals [4]uint32 +} + +// Len returns the number of elements in a Uint32x4 +func (x Uint32x4) Len() int { return 4 } + +// LoadUint32x4 loads a Uint32x4 from an array +// +//go:noescape +func LoadUint32x4(y *[4]uint32) Uint32x4 + +// Store stores a Uint32x4 to an array +// +//go:noescape +func (x Uint32x4) Store(y *[4]uint32) + +// v256 is a tag type that tells the compiler that this is really 256-bit SIMD +type v256 struct { + _256 struct{} +} + +// Int16x16 is a 256-bit SIMD vector of 16 int16 +type Int16x16 struct { + int16x16 v256 + vals [16]int16 +} + +// Len returns the number of elements in a Int16x16 +func (x Int16x16) Len() int { return 16 } + +// LoadInt16x16 loads a Int16x16 from an array +// +//go:noescape +func LoadInt16x16(y *[16]int16) Int16x16 + +// Store stores a Int16x16 to an array +// +//go:noescape +func (x Int16x16) Store(y *[16]int16) + +// Int32x8 is a 256-bit SIMD vector of 8 int32 +type Int32x8 struct { + int32x8 v256 + vals [8]int32 +} + +// Len returns the number of elements in a Int32x8 +func (x Int32x8) Len() int { return 8 } + +// LoadInt32x8 loads a Int32x8 from an array +// +//go:noescape +func LoadInt32x8(y *[8]int32) Int32x8 + +// Store stores a Int32x8 to an array +// +//go:noescape +func (x Int32x8) Store(y *[8]int32) + +// Uint64x4 is a 256-bit SIMD vector of 4 uint64 +type Uint64x4 struct { + uint64x4 v256 + vals [4]uint64 +} + +// Len returns the number of elements in a Uint64x4 +func (x Uint64x4) Len() int { return 4 } + +// LoadUint64x4 loads a Uint64x4 from an array +// +//go:noescape +func LoadUint64x4(y *[4]uint64) Uint64x4 + +// Store stores a Uint64x4 to an array +// +//go:noescape +func (x Uint64x4) Store(y *[4]uint64) + +// Mask64x4 is a 256-bit SIMD vector of 4 int64 +type Mask64x4 struct { + int64x4 v256 + vals [4]int64 +} + +// Int64x4 is a 256-bit SIMD vector of 4 int64 +type Int64x4 struct { + int64x4 v256 + vals [4]int64 +} + +// Len returns the number of elements in a Int64x4 +func (x Int64x4) Len() int { return 4 } + +// LoadInt64x4 loads a Int64x4 from an array +// +//go:noescape +func LoadInt64x4(y *[4]int64) Int64x4 + +// Store stores a Int64x4 to an array +// +//go:noescape +func (x Int64x4) Store(y *[4]int64) + +// Float64x4 is a 256-bit SIMD vector of 4 float64 +type Float64x4 struct { + float64x4 v256 + vals [4]float64 +} + +// Len returns the number of elements in a Float64x4 +func (x Float64x4) Len() int { return 4 } + +// LoadFloat64x4 loads a Float64x4 from an array +// +//go:noescape +func LoadFloat64x4(y *[4]float64) Float64x4 + +// Store stores a Float64x4 to an array +// +//go:noescape +func (x Float64x4) Store(y *[4]float64) + +// Mask16x16 is a 256-bit SIMD vector of 16 int16 +type Mask16x16 struct { + int16x16 v256 + vals [16]int16 +} + +// Mask32x8 is a 256-bit SIMD vector of 8 int32 +type Mask32x8 struct { + int32x8 v256 + vals [8]int32 +} + +// Float32x8 is a 256-bit SIMD vector of 8 float32 +type Float32x8 struct { + float32x8 v256 + vals [8]float32 +} + +// Len returns the number of elements in a Float32x8 +func (x Float32x8) Len() int { return 8 } + +// LoadFloat32x8 loads a Float32x8 from an array +// +//go:noescape +func LoadFloat32x8(y *[8]float32) Float32x8 + +// Store stores a Float32x8 to an array +// +//go:noescape +func (x Float32x8) Store(y *[8]float32) + +// Uint16x16 is a 256-bit SIMD vector of 16 uint16 +type Uint16x16 struct { + uint16x16 v256 + vals [16]uint16 +} + +// Len returns the number of elements in a Uint16x16 +func (x Uint16x16) Len() int { return 16 } + +// LoadUint16x16 loads a Uint16x16 from an array +// +//go:noescape +func LoadUint16x16(y *[16]uint16) Uint16x16 + +// Store stores a Uint16x16 to an array +// +//go:noescape +func (x Uint16x16) Store(y *[16]uint16) + +// Int8x32 is a 256-bit SIMD vector of 32 int8 +type Int8x32 struct { + int8x32 v256 + vals [32]int8 +} + +// Len returns the number of elements in a Int8x32 +func (x Int8x32) Len() int { return 32 } + +// LoadInt8x32 loads a Int8x32 from an array +// +//go:noescape +func LoadInt8x32(y *[32]int8) Int8x32 + +// Store stores a Int8x32 to an array +// +//go:noescape +func (x Int8x32) Store(y *[32]int8) + +// Uint8x32 is a 256-bit SIMD vector of 32 uint8 +type Uint8x32 struct { + uint8x32 v256 + vals [32]uint8 +} + +// Len returns the number of elements in a Uint8x32 +func (x Uint8x32) Len() int { return 32 } + +// LoadUint8x32 loads a Uint8x32 from an array +// +//go:noescape +func LoadUint8x32(y *[32]uint8) Uint8x32 + +// Store stores a Uint8x32 to an array +// +//go:noescape +func (x Uint8x32) Store(y *[32]uint8) + +// Mask8x32 is a 256-bit SIMD vector of 32 int8 +type Mask8x32 struct { + int8x32 v256 + vals [32]int8 +} + +// Uint32x8 is a 256-bit SIMD vector of 8 uint32 +type Uint32x8 struct { + uint32x8 v256 + vals [8]uint32 +} + +// Len returns the number of elements in a Uint32x8 +func (x Uint32x8) Len() int { return 8 } + +// LoadUint32x8 loads a Uint32x8 from an array +// +//go:noescape +func LoadUint32x8(y *[8]uint32) Uint32x8 + +// Store stores a Uint32x8 to an array +// +//go:noescape +func (x Uint32x8) Store(y *[8]uint32) + +// v512 is a tag type that tells the compiler that this is really 512-bit SIMD +type v512 struct { + _512 struct{} +} + +// Float64x8 is a 512-bit SIMD vector of 8 float64 +type Float64x8 struct { + float64x8 v512 + vals [8]float64 +} + +// Len returns the number of elements in a Float64x8 +func (x Float64x8) Len() int { return 8 } + +// LoadFloat64x8 loads a Float64x8 from an array +// +//go:noescape +func LoadFloat64x8(y *[8]float64) Float64x8 + +// Store stores a Float64x8 to an array +// +//go:noescape +func (x Float64x8) Store(y *[8]float64) + +// Mask64x8 is a 512-bit SIMD vector of 8 int64 +type Mask64x8 struct { + int64x8 v512 + vals [8]int64 +} + +// Int64x8 is a 512-bit SIMD vector of 8 int64 +type Int64x8 struct { + int64x8 v512 + vals [8]int64 +} + +// Len returns the number of elements in a Int64x8 +func (x Int64x8) Len() int { return 8 } + +// LoadInt64x8 loads a Int64x8 from an array +// +//go:noescape +func LoadInt64x8(y *[8]int64) Int64x8 + +// Store stores a Int64x8 to an array +// +//go:noescape +func (x Int64x8) Store(y *[8]int64) + +// Uint8x64 is a 512-bit SIMD vector of 64 uint8 +type Uint8x64 struct { + uint8x64 v512 + vals [64]uint8 +} + +// Len returns the number of elements in a Uint8x64 +func (x Uint8x64) Len() int { return 64 } + +// LoadUint8x64 loads a Uint8x64 from an array +// +//go:noescape +func LoadUint8x64(y *[64]uint8) Uint8x64 + +// Store stores a Uint8x64 to an array +// +//go:noescape +func (x Uint8x64) Store(y *[64]uint8) + +// Mask8x64 is a 512-bit SIMD vector of 64 int8 +type Mask8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Int8x64 is a 512-bit SIMD vector of 64 int8 +type Int8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Len returns the number of elements in a Int8x64 +func (x Int8x64) Len() int { return 64 } + +// LoadInt8x64 loads a Int8x64 from an array +// +//go:noescape +func LoadInt8x64(y *[64]int8) Int8x64 + +// Store stores a Int8x64 to an array +// +//go:noescape +func (x Int8x64) Store(y *[64]int8) + +// Float32x16 is a 512-bit SIMD vector of 16 float32 +type Float32x16 struct { + float32x16 v512 + vals [16]float32 +} + +// Len returns the number of elements in a Float32x16 +func (x Float32x16) Len() int { return 16 } + +// LoadFloat32x16 loads a Float32x16 from an array +// +//go:noescape +func LoadFloat32x16(y *[16]float32) Float32x16 + +// Store stores a Float32x16 to an array +// +//go:noescape +func (x Float32x16) Store(y *[16]float32) + +// Mask32x16 is a 512-bit SIMD vector of 16 int32 +type Mask32x16 struct { + int32x16 v512 + vals [16]int32 +} + +// Int32x16 is a 512-bit SIMD vector of 16 int32 +type Int32x16 struct { + int32x16 v512 + vals [16]int32 +} + +// Len returns the number of elements in a Int32x16 +func (x Int32x16) Len() int { return 16 } + +// LoadInt32x16 loads a Int32x16 from an array +// +//go:noescape +func LoadInt32x16(y *[16]int32) Int32x16 + +// Store stores a Int32x16 to an array +// +//go:noescape +func (x Int32x16) Store(y *[16]int32) + +// Uint16x32 is a 512-bit SIMD vector of 32 uint16 +type Uint16x32 struct { + uint16x32 v512 + vals [32]uint16 +} + +// Len returns the number of elements in a Uint16x32 +func (x Uint16x32) Len() int { return 32 } + +// LoadUint16x32 loads a Uint16x32 from an array +// +//go:noescape +func LoadUint16x32(y *[32]uint16) Uint16x32 + +// Store stores a Uint16x32 to an array +// +//go:noescape +func (x Uint16x32) Store(y *[32]uint16) + +// Mask16x32 is a 512-bit SIMD vector of 32 int16 +type Mask16x32 struct { + int16x32 v512 + vals [32]int16 +} + +// Int16x32 is a 512-bit SIMD vector of 32 int16 +type Int16x32 struct { + int16x32 v512 + vals [32]int16 +} + +// Len returns the number of elements in a Int16x32 +func (x Int16x32) Len() int { return 32 } + +// LoadInt16x32 loads a Int16x32 from an array +// +//go:noescape +func LoadInt16x32(y *[32]int16) Int16x32 + +// Store stores a Int16x32 to an array +// +//go:noescape +func (x Int16x32) Store(y *[32]int16) + +// Uint64x8 is a 512-bit SIMD vector of 8 uint64 +type Uint64x8 struct { + uint64x8 v512 + vals [8]uint64 +} + +// Len returns the number of elements in a Uint64x8 +func (x Uint64x8) Len() int { return 8 } + +// LoadUint64x8 loads a Uint64x8 from an array +// +//go:noescape +func LoadUint64x8(y *[8]uint64) Uint64x8 + +// Store stores a Uint64x8 to an array +// +//go:noescape +func (x Uint64x8) Store(y *[8]uint64) + +// Uint32x16 is a 512-bit SIMD vector of 16 uint32 +type Uint32x16 struct { + uint32x16 v512 + vals [16]uint32 +} + +// Len returns the number of elements in a Uint32x16 +func (x Uint32x16) Len() int { return 16 } + +// LoadUint32x16 loads a Uint32x16 from an array +// +//go:noescape +func LoadUint32x16(y *[16]uint32) Uint32x16 + +// Store stores a Uint32x16 to an array +// +//go:noescape +func (x Uint32x16) Store(y *[16]uint32) From 2eaa5a0703167635287457ec562a6005e3397dbf Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 5 Jun 2025 15:09:19 -0400 Subject: [PATCH 014/139] [dev.simd] simd: add functions+methods to load-from/store-to slices Includes the generator (which is short and uncomplicated) and a few tests. Change-Id: Icba9de042935a59bee34b278306c241b7651f5b4 Reviewed-on: https://go-review.googlesource.com/c/go/+/679258 Auto-Submit: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/go/doc/comment/std_test.go | 5 - src/simd/cpu.go | 2 +- src/simd/genslice.go | 117 +++++++++++++ src/simd/no_tag.go | 9 + src/simd/simd_test.go | 63 +++++++ src/simd/slice_amd64.go | 308 +++++++++++++++++++++++++++++++++ 6 files changed, 498 insertions(+), 6 deletions(-) create mode 100644 src/simd/genslice.go create mode 100644 src/simd/no_tag.go create mode 100644 src/simd/slice_amd64.go diff --git a/src/go/doc/comment/std_test.go b/src/go/doc/comment/std_test.go index 9a40d1d09a73b4..bd0379856a4d8c 100644 --- a/src/go/doc/comment/std_test.go +++ b/src/go/doc/comment/std_test.go @@ -5,7 +5,6 @@ package comment import ( - "internal/buildcfg" "internal/diff" "internal/testenv" "slices" @@ -25,10 +24,6 @@ func TestStd(t *testing.T) { list = append(list, pkg) } } - // TODO remove this when simd is the default, for now fake its existence - if !buildcfg.Experiment.SIMD { - list = append(list, "simd") - } slices.Sort(list) have := strings.Join(stdPkgs, "\n") + "\n" diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 84bf03cfb03ade..52a5614e68eac4 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -4,7 +4,7 @@ //go:build goexperiment.simd -// the build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain // see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. package simd diff --git a/src/simd/genslice.go b/src/simd/genslice.go new file mode 100644 index 00000000000000..77b9b41c09754e --- /dev/null +++ b/src/simd/genslice.go @@ -0,0 +1,117 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// this generates all the code to load and store simd +// vectors to/from slices. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "os" + "strings" +) + +// //go:noescape +// func LoadUint8x16Slice(s []uint8) Uint8x16 { +// return LoadUint8x16((*[16]uint8)(s[:16])) +// } + +// //go:noescape +// func (x Uint8x16) StoreSlice(s []uint8) { +// x.Store((*[16]uint8)(s[:16])) +// } + +func slice(e string, w, c int, out io.Writer) { + b := w * c + if b < 128 || b > 512 { + return + } + E := strings.ToUpper(e[:1]) + e[1:] + t := fmt.Sprintf("%s%d", e, w) + v := fmt.Sprintf("%s%dx%d", E, w, c) + a := "a" + if strings.Contains("aeiou", e[:1]) { + a = "an" + } + fmt.Fprintf(out, + ` +// Load%sSlice loads %s %s from a slice of at least %d %ss +func Load%sSlice(s []%s) %s { + return Load%s((*[%d]%s)(s)) +} +`, v, a, v, c, t, v, t, v, v, c, t) + + fmt.Fprintf(out, + ` +// StoreSlice stores x into a slice of at least %d %ss +func (x %s) StoreSlice(s []%s) { + x.Store((*[%d]%s)(s)) +} +`, c, t, v, t, c, t) + +} + +func prologue(s string, out io.Writer) { + fmt.Fprintf(out, + `// Code generated by '%s'; DO NOT EDIT. + +//go:build goexperiment.simd + +// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. + +package simd + +`, s) +} + +func main() { + filename := flag.String("o", "", "write generated code to this file") + flag.Parse() + + ofile := os.Stdout + + if *filename != "" { + var err error + ofile, err = os.Create(*filename) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not create the output file for the generated code, %v", err) + os.Exit(1) + } + } + + out := new(bytes.Buffer) + + prologue("go run genslice.go -o slice_amd64.go", out) + + vecs := []int{128, 256, 512} + ints := []int{8, 16, 32, 64} + floats := []int{32, 64} + for _, v := range vecs { + for _, w := range ints { + c := v / w + slice("int", w, c, out) + slice("uint", w, c, out) + } + for _, w := range floats { + c := v / w + slice("float", w, c, out) + } + } + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code, %v", err) + os.Exit(1) + } else { + ofile.Write(b) + ofile.Close() + } +} diff --git a/src/simd/no_tag.go b/src/simd/no_tag.go new file mode 100644 index 00000000000000..c11fd51b2345d6 --- /dev/null +++ b/src/simd/no_tag.go @@ -0,0 +1,9 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simd + +// This file has no build tag, so that go generate can run without a build tag. + +//go:generate go run genslice.go -o slice_amd64.go diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index e611092c4335c2..37e07c96d78618 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -163,3 +163,66 @@ func TestSub(t *testing.T) { } } } + +// checkInt8Slices ensures that b and a are equal, to the end of b. +// also serves to use the slices, to prevent accidental optimization. +func checkInt8Slices(t *testing.T, a, b []int8) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + +func TestSlicesInt8(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 32, 32) + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + +func TestSlicesInt8TooShortLoad(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Saw EXPECTED panic %v", r) + } else { + t.Errorf("Did not see expected panic") + } + }() + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} // TOO SHORT, should panic + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 32, 32) + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + +func TestSlicesInt8TooShortStore(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Saw EXPECTED panic %v", r) + } else { + t.Errorf("Did not see expected panic") + } + }() + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 31) // TOO SHORT, should panic + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + +func TestSlicesFloat64(t *testing.T) { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8} // too long, should be fine + v := simd.LoadFloat64x4Slice(a) + b := make([]float64, 4, 4) + v.StoreSlice(b) + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%f, b=%f", i, a[i], b[i]) + } + } +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go new file mode 100644 index 00000000000000..10050e6b9f82a2 --- /dev/null +++ b/src/simd/slice_amd64.go @@ -0,0 +1,308 @@ +// Code generated by 'go run genslice.go -o slice_amd64.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. + +package simd + +// LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s +func LoadInt8x16Slice(s []int8) Int8x16 { + return LoadInt8x16((*[16]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int8s +func (x Int8x16) StoreSlice(s []int8) { + x.Store((*[16]int8)(s)) +} + +// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s +func LoadUint8x16Slice(s []uint8) Uint8x16 { + return LoadUint8x16((*[16]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint8s +func (x Uint8x16) StoreSlice(s []uint8) { + x.Store((*[16]uint8)(s)) +} + +// LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s +func LoadInt16x8Slice(s []int16) Int16x8 { + return LoadInt16x8((*[8]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int16s +func (x Int16x8) StoreSlice(s []int16) { + x.Store((*[8]int16)(s)) +} + +// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s +func LoadUint16x8Slice(s []uint16) Uint16x8 { + return LoadUint16x8((*[8]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint16s +func (x Uint16x8) StoreSlice(s []uint16) { + x.Store((*[8]uint16)(s)) +} + +// LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s +func LoadInt32x4Slice(s []int32) Int32x4 { + return LoadInt32x4((*[4]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int32s +func (x Int32x4) StoreSlice(s []int32) { + x.Store((*[4]int32)(s)) +} + +// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s +func LoadUint32x4Slice(s []uint32) Uint32x4 { + return LoadUint32x4((*[4]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint32s +func (x Uint32x4) StoreSlice(s []uint32) { + x.Store((*[4]uint32)(s)) +} + +// LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s +func LoadInt64x2Slice(s []int64) Int64x2 { + return LoadInt64x2((*[2]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 int64s +func (x Int64x2) StoreSlice(s []int64) { + x.Store((*[2]int64)(s)) +} + +// LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s +func LoadUint64x2Slice(s []uint64) Uint64x2 { + return LoadUint64x2((*[2]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 uint64s +func (x Uint64x2) StoreSlice(s []uint64) { + x.Store((*[2]uint64)(s)) +} + +// LoadFloat32x4Slice loads a Float32x4 from a slice of at least 4 float32s +func LoadFloat32x4Slice(s []float32) Float32x4 { + return LoadFloat32x4((*[4]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float32s +func (x Float32x4) StoreSlice(s []float32) { + x.Store((*[4]float32)(s)) +} + +// LoadFloat64x2Slice loads a Float64x2 from a slice of at least 2 float64s +func LoadFloat64x2Slice(s []float64) Float64x2 { + return LoadFloat64x2((*[2]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 float64s +func (x Float64x2) StoreSlice(s []float64) { + x.Store((*[2]float64)(s)) +} + +// LoadInt8x32Slice loads an Int8x32 from a slice of at least 32 int8s +func LoadInt8x32Slice(s []int8) Int8x32 { + return LoadInt8x32((*[32]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int8s +func (x Int8x32) StoreSlice(s []int8) { + x.Store((*[32]int8)(s)) +} + +// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s +func LoadUint8x32Slice(s []uint8) Uint8x32 { + return LoadUint8x32((*[32]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint8s +func (x Uint8x32) StoreSlice(s []uint8) { + x.Store((*[32]uint8)(s)) +} + +// LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s +func LoadInt16x16Slice(s []int16) Int16x16 { + return LoadInt16x16((*[16]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int16s +func (x Int16x16) StoreSlice(s []int16) { + x.Store((*[16]int16)(s)) +} + +// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s +func LoadUint16x16Slice(s []uint16) Uint16x16 { + return LoadUint16x16((*[16]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint16s +func (x Uint16x16) StoreSlice(s []uint16) { + x.Store((*[16]uint16)(s)) +} + +// LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s +func LoadInt32x8Slice(s []int32) Int32x8 { + return LoadInt32x8((*[8]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int32s +func (x Int32x8) StoreSlice(s []int32) { + x.Store((*[8]int32)(s)) +} + +// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s +func LoadUint32x8Slice(s []uint32) Uint32x8 { + return LoadUint32x8((*[8]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint32s +func (x Uint32x8) StoreSlice(s []uint32) { + x.Store((*[8]uint32)(s)) +} + +// LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s +func LoadInt64x4Slice(s []int64) Int64x4 { + return LoadInt64x4((*[4]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int64s +func (x Int64x4) StoreSlice(s []int64) { + x.Store((*[4]int64)(s)) +} + +// LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s +func LoadUint64x4Slice(s []uint64) Uint64x4 { + return LoadUint64x4((*[4]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint64s +func (x Uint64x4) StoreSlice(s []uint64) { + x.Store((*[4]uint64)(s)) +} + +// LoadFloat32x8Slice loads a Float32x8 from a slice of at least 8 float32s +func LoadFloat32x8Slice(s []float32) Float32x8 { + return LoadFloat32x8((*[8]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float32s +func (x Float32x8) StoreSlice(s []float32) { + x.Store((*[8]float32)(s)) +} + +// LoadFloat64x4Slice loads a Float64x4 from a slice of at least 4 float64s +func LoadFloat64x4Slice(s []float64) Float64x4 { + return LoadFloat64x4((*[4]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float64s +func (x Float64x4) StoreSlice(s []float64) { + x.Store((*[4]float64)(s)) +} + +// LoadInt8x64Slice loads an Int8x64 from a slice of at least 64 int8s +func LoadInt8x64Slice(s []int8) Int8x64 { + return LoadInt8x64((*[64]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 int8s +func (x Int8x64) StoreSlice(s []int8) { + x.Store((*[64]int8)(s)) +} + +// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s +func LoadUint8x64Slice(s []uint8) Uint8x64 { + return LoadUint8x64((*[64]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 uint8s +func (x Uint8x64) StoreSlice(s []uint8) { + x.Store((*[64]uint8)(s)) +} + +// LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s +func LoadInt16x32Slice(s []int16) Int16x32 { + return LoadInt16x32((*[32]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int16s +func (x Int16x32) StoreSlice(s []int16) { + x.Store((*[32]int16)(s)) +} + +// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s +func LoadUint16x32Slice(s []uint16) Uint16x32 { + return LoadUint16x32((*[32]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint16s +func (x Uint16x32) StoreSlice(s []uint16) { + x.Store((*[32]uint16)(s)) +} + +// LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s +func LoadInt32x16Slice(s []int32) Int32x16 { + return LoadInt32x16((*[16]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int32s +func (x Int32x16) StoreSlice(s []int32) { + x.Store((*[16]int32)(s)) +} + +// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s +func LoadUint32x16Slice(s []uint32) Uint32x16 { + return LoadUint32x16((*[16]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint32s +func (x Uint32x16) StoreSlice(s []uint32) { + x.Store((*[16]uint32)(s)) +} + +// LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s +func LoadInt64x8Slice(s []int64) Int64x8 { + return LoadInt64x8((*[8]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int64s +func (x Int64x8) StoreSlice(s []int64) { + x.Store((*[8]int64)(s)) +} + +// LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s +func LoadUint64x8Slice(s []uint64) Uint64x8 { + return LoadUint64x8((*[8]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint64s +func (x Uint64x8) StoreSlice(s []uint64) { + x.Store((*[8]uint64)(s)) +} + +// LoadFloat32x16Slice loads a Float32x16 from a slice of at least 16 float32s +func LoadFloat32x16Slice(s []float32) Float32x16 { + return LoadFloat32x16((*[16]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 float32s +func (x Float32x16) StoreSlice(s []float32) { + x.Store((*[16]float32)(s)) +} + +// LoadFloat64x8Slice loads a Float64x8 from a slice of at least 8 float64s +func LoadFloat64x8Slice(s []float64) Float64x8 { + return LoadFloat64x8((*[8]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float64s +func (x Float64x8) StoreSlice(s []float64) { + x.Store((*[8]float64)(s)) +} From 6bc35057730590ce6d01c589d3ef51400d832981 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 9 Jun 2025 16:57:38 +0000 Subject: [PATCH 015/139] [dev.simd] cmd/compile: add fp3fp1 regsiter shape This is to accomodate dot product instructions. Change-Id: I88b21f848d7a51ad036bb3555c30f12b72571b2b Reviewed-on: https://go-review.googlesource.com/c/go/+/680235 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 2b610674844725..e5cc261bcf625d 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -188,6 +188,7 @@ func init() { fp1m1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1298,7 +1299,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b08c5f230ffa44..c7ab52399299f9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { +func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1 regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, From 884f646966efdc1b2ee6dc7728bade7ceef33ace Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 9 Jun 2025 20:05:57 +0000 Subject: [PATCH 016/139] [dev.simd] cmd/compile: add fp3m1fp1 shape to regalloc Change-Id: Ie89cf521f5ae59de1934f6f49bb5fd3f63cc5883 Reviewed-on: https://go-review.googlesource.com/c/go/+/680236 Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index e5cc261bcf625d..fbc3129de622ed 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -189,6 +189,7 @@ func init() { fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + fp3m1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1299,7 +1300,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index c7ab52399299f9..a27ed4afb9ef70 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1 regInfo) []opData { +func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1 regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, From dfa6c7426316fb81c5f29b260b2de7822680ffd3 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 12 Jun 2025 18:37:01 -0400 Subject: [PATCH 017/139] [dev.simd] runtime: eliminate global state in mkpreempt.go We're going to start writing two files, so having a single global file we're writing will be a problem. This has no effect on the generated code. Change-Id: I49897ea0c6500a29eac89b597d75c0eb3e9b6706 Reviewed-on: https://go-review.googlesource.com/c/go/+/680897 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/runtime/mkpreempt.go | 166 ++++++++++++++++++++++----------------- 1 file changed, 94 insertions(+), 72 deletions(-) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 6a9cf77a43fcf0..ec900a23d257e9 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -73,16 +73,14 @@ var regNamesAMD64 = []string{ "X15", } -var out io.Writer - -var arches = map[string]func(){ +var arches = map[string]func(g *gen){ "386": gen386, "amd64": genAMD64, "arm": genARM, "arm64": genARM64, "loong64": genLoong64, - "mips64x": func() { genMIPS(true) }, - "mipsx": func() { genMIPS(false) }, + "mips64x": func(g *gen) { genMIPS(g, true) }, + "mipsx": func(g *gen) { genMIPS(g, false) }, "ppc64x": genPPC64, "riscv64": genRISCV64, "s390x": genS390X, @@ -93,53 +91,58 @@ var beLe = map[string]bool{"mips64x": true, "mipsx": true, "ppc64x": true} func main() { flag.Parse() if flag.NArg() > 0 { - out = os.Stdout for _, arch := range flag.Args() { - gen, ok := arches[arch] + genFn, ok := arches[arch] if !ok { log.Fatalf("unknown arch %s", arch) } - header(arch) - gen() + g := gen{os.Stdout, arch} + g.asmHeader() + genFn(&g) } return } - for arch, gen := range arches { + for arch, genFn := range arches { f, err := os.Create(fmt.Sprintf("preempt_%s.s", arch)) if err != nil { log.Fatal(err) } - out = f - header(arch) - gen() + g := gen{f, arch} + g.asmHeader() + genFn(&g) if err := f.Close(); err != nil { log.Fatal(err) } } } -func header(arch string) { - fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n") - if beLe[arch] { - base := arch[:len(arch)-1] - fmt.Fprintf(out, "//go:build %s || %sle\n\n", base, base) +type gen struct { + w io.Writer + goarch string +} + +func (g *gen) asmHeader() { + fmt.Fprintf(g.w, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n") + if beLe[g.goarch] { + base := g.goarch[:len(g.goarch)-1] + fmt.Fprintf(g.w, "//go:build %s || %sle\n\n", base, base) } - fmt.Fprintf(out, "#include \"go_asm.h\"\n") - if arch == "amd64" { - fmt.Fprintf(out, "#include \"asm_amd64.h\"\n") + fmt.Fprintf(g.w, "#include \"go_asm.h\"\n") + if g.goarch == "amd64" { + fmt.Fprintf(g.w, "#include \"asm_amd64.h\"\n") } - fmt.Fprintf(out, "#include \"textflag.h\"\n\n") - fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n") + fmt.Fprintf(g.w, "#include \"textflag.h\"\n\n") + fmt.Fprintf(g.w, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n") } -func p(f string, args ...any) { +func (g *gen) p(f string, args ...any) { fmted := fmt.Sprintf(f, args...) - fmt.Fprintf(out, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t")) + fmt.Fprintf(g.w, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t")) } -func label(l string) { - fmt.Fprintf(out, "%s\n", l) +func (g *gen) label(l string) { + fmt.Fprintf(g.w, "%s\n", l) } type layout struct { @@ -176,28 +179,30 @@ func (l *layout) addSpecial(save, restore string, size int) { l.stack += size } -func (l *layout) save() { +func (l *layout) save(g *gen) { for _, reg := range l.regs { if reg.save != "" { - p(reg.save, reg.pos) + g.p(reg.save, reg.pos) } else { - p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) + g.p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) } } } -func (l *layout) restore() { +func (l *layout) restore(g *gen) { for i := len(l.regs) - 1; i >= 0; i-- { reg := l.regs[i] if reg.restore != "" { - p(reg.restore, reg.pos) + g.p(reg.restore, reg.pos) } else { - p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) + g.p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) } } } -func gen386() { +func gen386(g *gen) { + p := g.p + p("PUSHFL") // Save general purpose registers. var l = layout{sp: "SP"} @@ -218,22 +223,24 @@ func gen386() { p("ADJSP $%d", lSSE.stack) p("NOP SP") - l.save() + l.save(g) p("#ifndef %s", softfloat) - lSSE.save() + lSSE.save(g) p("#endif") p("CALL ·asyncPreempt2(SB)") p("#ifndef %s", softfloat) - lSSE.restore() + lSSE.restore(g) p("#endif") - l.restore() + l.restore(g) p("ADJSP $%d", -lSSE.stack) p("POPFL") p("RET") } -func genAMD64() { +func genAMD64(g *gen) { + p := g.p + // Assign stack offsets. var l = layout{sp: "SP"} for _, reg := range regNamesAMD64 { @@ -262,19 +269,21 @@ func genAMD64() { p("// But vet doesn't know ADJSP, so suppress vet stack checking") p("NOP SP") - l.save() + l.save(g) - lSSE.save() + lSSE.save(g) p("CALL ·asyncPreempt2(SB)") - lSSE.restore() - l.restore() + lSSE.restore(g) + l.restore(g) p("ADJSP $%d", -lSSE.stack) p("POPFQ") p("POPQ BP") p("RET") } -func genARM() { +func genARM(g *gen) { + p := g.p + // Add integer registers R0-R12. // R13 (SP), R14 (LR), R15 (PC) are special and not saved here. var l = layout{sp: "R13", stack: 4} // add LR slot @@ -303,22 +312,23 @@ func genARM() { } p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR - l.save() + l.save(g) p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. - lfp.save() - label("nofp:") + lfp.save(g) + g.label("nofp:") p("CALL ·asyncPreempt2(SB)") p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. - lfp.restore() - label("nofp2:") - l.restore() + lfp.restore(g) + g.label("nofp2:") + l.restore(g) p("MOVW %d(R13), R14", lfp.stack) // sigctxt.pushCall pushes LR on stack, restore it p("MOVW.P %d(R13), R15", lfp.stack+4) // load PC, pop frame (including the space pushed by sigctxt.pushCall) p("UNDEF") // shouldn't get here } -func genARM64() { +func genARM64(g *gen) { + p := g.p // Add integer registers R0-R26 // R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special // and not saved here. @@ -362,9 +372,9 @@ func genARM64() { p("MOVD R30, (RSP)") p("#endif") - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p("MOVD -8(RSP), R29") // restore frame pointer @@ -373,7 +383,9 @@ func genARM64() { p("RET (R27)") } -func genMIPS(_64bit bool) { +func genMIPS(g *gen, _64bit bool) { + p := g.p + mov := "MOVW" movf := "MOVF" add := "ADD" @@ -428,15 +440,15 @@ func genMIPS(_64bit bool) { p(mov+" R31, -%d(R29)", lfp.stack) p(sub+" $%d, R29", lfp.stack) - l.save() + l.save(g) p("#ifndef %s", softfloat) - lfp.save() + lfp.save(g) p("#endif") p("CALL ·asyncPreempt2(SB)") p("#ifndef %s", softfloat) - lfp.restore() + lfp.restore(g) p("#endif") - l.restore() + l.restore(g) p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p(mov + " (R29), R23") // load PC to REGTMP @@ -444,7 +456,9 @@ func genMIPS(_64bit bool) { p("JMP (R23)") } -func genLoong64() { +func genLoong64(g *gen) { + p := g.p + mov := "MOVV" movf := "MOVD" add := "ADDV" @@ -478,9 +492,9 @@ func genLoong64() { p(mov+" R1, -%d(R3)", l.stack) p(sub+" $%d, R3", l.stack) - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p(mov + " (R3), R30") // load PC to REGTMP @@ -488,7 +502,9 @@ func genLoong64() { p("JMP (R30)") } -func genPPC64() { +func genPPC64(g *gen) { + p := g.p + // Add integer registers R3-R29 // R0 (zero), R1 (SP), R30 (g) are special and not saved here. // R2 (TOC pointer in PIC mode), R12 (function entry address in PIC mode) have been saved in sigctxt.pushCall. @@ -528,9 +544,9 @@ func genPPC64() { p("MOVD LR, R31") p("MOVDU R31, -%d(R1)", l.stack) // allocate frame, save PC of interrupted instruction (in LR) - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOVD %d(R1), R31", l.stack) // sigctxt.pushCall has pushed LR, R2, R12 (at interrupt) on stack, restore them p("MOVD R31, LR") @@ -543,7 +559,9 @@ func genPPC64() { p("JMP (CTR)") } -func genRISCV64() { +func genRISCV64(g *gen) { + p := g.p + // X0 (zero), X1 (LR), X2 (SP), X3 (GP), X4 (TP), X27 (g), X31 (TMP) are special. var l = layout{sp: "X2", stack: 8} @@ -564,16 +582,18 @@ func genRISCV64() { p("MOV X1, -%d(X2)", l.stack) p("SUB $%d, X2", l.stack) - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOV %d(X2), X1", l.stack) p("MOV (X2), X31") p("ADD $%d, X2", l.stack+8) p("JMP (X31)") } -func genS390X() { +func genS390X(g *gen) { + p := g.p + // Add integer registers R0-R12 // R13 (g), R14 (LR), R15 (SP) are special, and not saved here. // Saving R10 (REGTMP) is not necessary, but it is saved anyway. @@ -594,9 +614,9 @@ func genS390X() { p("ADD $-%d, R15", l.stack) p("MOVW R10, 8(R15)") // save flags - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOVD %d(R15), R14", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p("ADD $%d, R15", l.stack+8) // pop frame (including the space pushed by sigctxt.pushCall) @@ -606,12 +626,14 @@ func genS390X() { p("JMP (R10)") } -func genWasm() { +func genWasm(g *gen) { + p := g.p p("// No async preemption on wasm") p("UNDEF") } -func notImplemented() { +func notImplemented(g *gen) { + p := g.p p("// Not implemented yet") p("JMP ·abort(SB)") } From 9b9af3d6386d7564d71ff61468cea597bf0511bc Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 12 Jun 2025 15:24:22 -0400 Subject: [PATCH 018/139] [dev.simd] internal/cpu: add AVX-512-CD and DQ, and derived "basic AVX-512" This adds detection for the CD and DQ sub-features of x86 AVX-512. Building on these, we also add a "derived" AVX-512 feature that bundles together the basic usable subset of subfeatures. Despite the F in AVX-512-F standing for "foundation", AVX-512-F+BW+DQ+VL together really form the basic usable subset of AVX-512 functionality. These have also all been supported together by almost every CPU, and are guaranteed by GOAMD64=v4, so there's little point in separating them out. Change-Id: I34356502bd1853ba2372e48db0b10d55cffe07a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/680899 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/internal/cpu/cpu.go | 10 ++++++++++ src/internal/cpu/cpu_x86.go | 17 +++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index 760dc0b469d83d..a93eb54ddf0cd7 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -31,8 +31,11 @@ var X86 struct { HasADX bool HasAVX bool HasAVX2 bool + HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL HasAVX512F bool + HasAVX512CD bool HasAVX512BW bool + HasAVX512DQ bool HasAVX512VL bool HasBMI1 bool HasBMI2 bool @@ -160,6 +163,10 @@ var RISCV64 struct { //go:linkname S390X //go:linkname RISCV64 +// doDerived, if non-nil, is called after processing GODEBUG to set "derived" +// feature flags. +var doDerived func() + // Initialize examines the processor and sets the relevant variables above. // This is called by the runtime package early in program initialization, // before normal init functions are run. env is set by runtime if the OS supports @@ -167,6 +174,9 @@ var RISCV64 struct { func Initialize(env string) { doinit() processOptions(env) + if doDerived != nil { + doDerived() + } } // options contains the cpu debug options that can be used in GODEBUG. diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index ee812076e96c49..7d6f40c1326759 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -36,7 +36,9 @@ const ( cpuid_BMI2 = 1 << 8 cpuid_ERMS = 1 << 9 cpuid_AVX512F = 1 << 16 + cpuid_AVX512DQ = 1 << 17 cpuid_ADX = 1 << 19 + cpuid_AVX512CD = 1 << 28 cpuid_SHA = 1 << 29 cpuid_AVX512BW = 1 << 30 cpuid_AVX512VL = 1 << 31 @@ -84,7 +86,9 @@ func doinit() { // they can be turned off. options = append(options, option{Name: "avx512f", Feature: &X86.HasAVX512F}, + option{Name: "avx512cd", Feature: &X86.HasAVX512CD}, option{Name: "avx512bw", Feature: &X86.HasAVX512BW}, + option{Name: "avx512dq", Feature: &X86.HasAVX512DQ}, option{Name: "avx512vl", Feature: &X86.HasAVX512VL}, ) } @@ -149,7 +153,9 @@ func doinit() { X86.HasAVX512F = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 if X86.HasAVX512F { + X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD) X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) + X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) } @@ -164,6 +170,17 @@ func doinit() { _, _, _, edxExt1 := cpuid(0x80000001, 0) X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP) + + doDerived = func() { + // Rather than carefully gating on fundamental AVX-512 features, we have + // a virtual "AVX512" feature that captures F+CD+BW+DQ+VL. BW, DQ, and + // VL have a huge effect on which AVX-512 instructions are available, + // and these have all been supported on everything except the earliest + // Phi chips with AVX-512. No CPU has had CD without F, so we include + // it. GOAMD64=v4 also implies exactly this set, and these are all + // included in AVX10.1. + X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL + } } func isSet(hwc uint32, value uint32) bool { From c81cb05e3ef0da39f87f85f4817dea73d587256a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 11 Jun 2025 17:32:00 +0000 Subject: [PATCH 019/139] [dev.simd] cmd/compile: add simdGen prog writer This CL is a synergy between simdgen refactor CL 681195. Change-Id: I365becf515a261bd22c46824613c2dce309cac45 Reviewed-on: https://go-review.googlesource.com/c/go/+/681036 Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 143 +++++++++++++++++- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 64 ++++---- 2 files changed, 171 insertions(+), 36 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index dcc4e30e1e65c2..2962fe1698e164 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1517,24 +1517,101 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } } -func simdGenUnary(s *ssagen.State, v *ssa.Value) { +// Example instruction: VRSQRTPS X1, X1 +func simdFp11(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPSUBD X1, X2, X3 +func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + // Vector registers operands follows a right-to-left order. + // e.g. VPSUBD X1, X2, X3 means X3 = X2 - X1. + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPCMPEQW Z26, Z30, K4 +func simdFp2k1(s *ssagen.State, v *ssa.Value) *obj.Prog { + // simdReg handles mask and vector registers altogether + return simdFp21(s, v) } -func simdGenBinary(s *ssagen.State, v *ssa.Value) { +// Example instruction: VPMINUQ X21, X3, K3, X31 +func simdFp2k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + // These "simd*" series of functions assumes: + // Any "K" register that serves as the write-mask + // or "predicate" for "predicated AVX512 instructions" + // sits right at the end of the operand list. + // TODO: verify this assumption. + p.AddRestSourceReg(simdReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPCMPEQW Z26, Z30, K1, K4 +func simdFp2k1k1(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp2k1fp1(s, v) +} + +// Example instruction: VPOPCNTB X14, K4, X16 +func simdFp1k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) p.AddRestSourceReg(simdReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VROUNDPD $7, X2, X2 +func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VREDUCEPD $126, X1, K3, X31 +func simdFp1k1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p } -func simdGenUnaryImmUint8(s *ssagen.State, v *ssa.Value) { +// Example instruction: VCMPPS $7, X2, X9, X2 +func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1542,12 +1619,20 @@ func simdGenUnaryImmUint8(s *ssagen.State, v *ssa.Value) { } p.From.Offset = imm p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPCMPD $1, Z1, Z2, K1 +func simdFp2k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp21Imm8(s, v) } -func simdGenBinaryImmUint8(s *ssagen.State, v *ssa.Value) { +// Example instruction: VPCMPD $1, Z1, Z2, K2, K1 +func simdFp2k1k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1555,10 +1640,60 @@ func simdGenBinaryImmUint8(s *ssagen.State, v *ssa.Value) { } p.From.Offset = imm p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VFMADD213PD Z2, Z1, Z0 +func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VFMADD213PD Z2, Z1, K1, Z0 +func simdFp3k1fp1ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[3])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Currently unused +func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Currently unused +func simdFp3k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[3])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p } var blockJump = [...]struct { diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index fbc3129de622ed..99d0d0ec740063 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -182,14 +182,14 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} - fp1m1 = regInfo{inputs: fponly, outputs: maskonly} - m1fp1 = regInfo{inputs: maskonly, outputs: fponly} - fp2m1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} - fp1m1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} - fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} - fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + fp1k1 = regInfo{inputs: fponly, outputs: maskonly} + k1fp1 = regInfo{inputs: maskonly, outputs: fponly} + fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} + fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - fp3m1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1233,37 +1233,37 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem - {name: "VPMOVMToVec8x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x64", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec16x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec32x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec64x2", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x2", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, - {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, - {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, - {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, @@ -1300,7 +1300,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", From 5289e0f24e568fc2aad4a15334464ce760cd1655 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 03:54:34 +0000 Subject: [PATCH 020/139] [dev.simd] cmd/compile: updates simd ordering and docs This CL is generated by CL 681395. Change-Id: Ic930aeeb24fc7f95a4d74c77403532d0b0eb39ff Reviewed-on: https://go-review.googlesource.com/c/go/+/681215 Auto-Submit: Junyang Shao Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 3033 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 2089 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 1186 +- src/cmd/compile/internal/ssa/opGen.go | 3597 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 23824 +++++++--------- .../compile/internal/ssagen/simdintrinsics.go | 704 +- src/simd/stubs_amd64.go | 4210 +-- src/simd/types_amd64.go | 480 +- 8 files changed, 17115 insertions(+), 22008 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d8d1a4c1a46e76..253bec09ca5024 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -10,2311 +10,870 @@ import ( ) func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { - p := s.Prog(v.Op.Asm()) - // First arg + var p *obj.Prog switch v.Op { - // Immediates - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPCMPW512: - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm - p.From.Type = obj.TYPE_CONST - - // Registers - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPMULHUWMasked512, + case ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPABSD128, + ssa.OpAMD64VPABSD256, + ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPABSB256, ssa.OpAMD64VPABSW512, + ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPABSQ128, + ssa.OpAMD64VPABSQ256, + ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VPABSB512, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VPOPCNTW256, ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPOPCNTW128, + ssa.OpAMD64VPOPCNTD512, + ssa.OpAMD64VPOPCNTD128, + ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPOPCNTQ128, + ssa.OpAMD64VPOPCNTQ256, + ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VSQRTPS128, + ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VSQRTPD128, + ssa.OpAMD64VSQRTPD256, + ssa.OpAMD64VSQRTPS512, + ssa.OpAMD64VSQRTPD512: + p = simdFp11(s, v) + + case ssa.OpAMD64VADDPS128, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPADDW512, ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VANDPS256, ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VPAND128, ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VANDNPD128, ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN256, ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPAVGW512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VDIVPD256, ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VMAXPS512, ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPMAXUB512, ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSQRTPS512, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VRSQRT14PD512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMINUB256, ssa.OpAMD64VMINPS512, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMULUDQ256, ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPABSW256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPMULHUW512, ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPADDSW128, ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTQ128, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPOPCNTD256, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPABSQ256, - ssa.OpAMD64VPOPCNTW256, - ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPHADDSW256, ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPSUBD128, ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VPXORQ512: + p = simdFp21(s, v) + + case ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPCMPEQD512, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VPCMPEQB512, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPGTD512, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VPCMPGTB512: + p = simdFp2k1(s, v) + + case ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPADDWMasked512, ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VANDNPDMasked256, ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPOPCNTD512, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VSQRTPD512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPABSD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VSQRTPD128, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPOPCNTQ256, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VSQRTPS128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPABSD256, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VRCP14PS128, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSQMasked128, ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTD128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPOPCNTB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VRCP14PS256, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPABSQ128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPABSW128, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VSQRTPS256, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - p.From.Type = obj.TYPE_REG - p.From.Reg = simdReg(v.Args[0]) - - default: - // At least one arg is required. - return false - } - - // Second arg - switch v.Op { - // Registers - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VDIVPS256, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPXORQ512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[0])) - } else { - p.AddRestSourceReg(simdReg(v.Args[1])) - } - } - - // Third arg - switch v.Op { - // Registers - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[1])) - } else { - p.AddRestSourceReg(simdReg(v.Args[2])) - } - } - - // Fourth arg - switch v.Op { - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VPCMPUBMasked128: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[2])) - } else { - p.AddRestSourceReg(simdReg(v.Args[3])) - } - } - - // Output - switch v.Op { - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPABSW512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPABSD512, - ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSQRTPS512, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VRSQRT14PD512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPABSW256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTQ128, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPOPCNTD256, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPABSQ256, - ssa.OpAMD64VPOPCNTW256, - ssa.OpAMD64VDIVPS256, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPOPCNTD512, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VSQRTPD512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPABSD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VSQRTPD128, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPOPCNTQ256, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VSQRTPS128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VPXORQ512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPMINUDMasked256, ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPABSD256, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTD128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPOPCNTB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VRCP14PS256, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPXORQMasked512: + p = simdFp2k1fp1(s, v) + + case ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VPCMPEQDMasked512, + ssa.OpAMD64VPCMPEQDMasked128, + ssa.OpAMD64VPCMPEQDMasked256, ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPABSQ128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPABSW128, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VPCMPEQBMasked128, + ssa.OpAMD64VPCMPEQBMasked256, + ssa.OpAMD64VPCMPEQBMasked512, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPCMPGTDMasked512, + ssa.OpAMD64VPCMPGTDMasked128, + ssa.OpAMD64VPCMPGTDMasked256, ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VPCMPGTBMasked128, + ssa.OpAMD64VPCMPGTBMasked256, + ssa.OpAMD64VPCMPGTBMasked512: + p = simdFp2k1k1(s, v) + + case ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VSQRTPS256, - ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VSQRTPDMasked512: + p = simdFp1k1fp1(s, v) + + case ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPD256: + p = simdFp21Imm8(s, v) + + case ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPB512: + p = simdFp2k1Imm8(s, v) + + case ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPBMasked512: + p = simdFp2k1k1Imm8(s, v) default: - // One result is required. + // Unknown reg shape return false } // Masked operation are always compiled with zeroing. switch v.Op { - case ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, + case ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VADDPDMasked512, ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked512, ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPADDQMasked512, ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VANDPDMasked512, ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPANDNQMasked512, ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSBMasked512, ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPMINUDMasked256, ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPORQMasked512, ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VSCALEFPSMasked256: + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPXORQMasked512: x86.ParseSuffix(p, "Z") } diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index a273131d469613..a9daf275484491 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,1081 +1,1074 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -// The AVX instruction encodings orders vector register from right to left, for example: -// VSUBPS X Y Z means Z=Y-X -// The rules here swapped the order of such X and Y because the ssa to prog lowering in simdssa.go assumes a -// left to right order. -// TODO: we should offload the logic to simdssa.go, instead of here. -// -// Masks are always at the end, immediates always at the beginning. -(AddFloat32x16 x y) => (VADDPS512 y x) -(AndFloat32x16 x y) => (VANDPS512 y x) -(AndNotFloat32x16 x y) => (VANDNPS512 y x) -(ApproximateReciprocalFloat32x16 x) => (VRCP14PS512 x) -(ApproximateReciprocalOfSqrtFloat32x16 x) => (VRSQRT14PS512 x) -(DivFloat32x16 x y) => (VDIVPS512 y x) -(MaxFloat32x16 x y) => (VMAXPS512 y x) -(MinFloat32x16 x y) => (VMINPS512 y x) -(MulFloat32x16 x y) => (VMULPS512 y x) -(MulByPowOf2Float32x16 x y) => (VSCALEFPS512 y x) -(OrFloat32x16 x y) => (VORPS512 y x) -(SqrtFloat32x16 x) => (VSQRTPS512 x) -(SubFloat32x16 x y) => (VADDPS512 y x) -(XorFloat32x16 x y) => (VXORPS512 y x) -(AddFloat32x4 x y) => (VADDPS128 y x) -(AndFloat32x4 x y) => (VANDPS128 y x) -(AndNotFloat32x4 x y) => (VANDNPS128 y x) -(ApproximateReciprocalFloat32x4 x) => (VRCP14PS128 x) -(ApproximateReciprocalOfSqrtFloat32x4 x) => (VRSQRTPS128 x) -(DivFloat32x4 x y) => (VDIVPS128 y x) -(MaxFloat32x4 x y) => (VMAXPS128 y x) -(MinFloat32x4 x y) => (VMINPS128 y x) -(MulFloat32x4 x y) => (VMULPS128 y x) -(MulByPowOf2Float32x4 x y) => (VSCALEFPS128 y x) -(OrFloat32x4 x y) => (VORPS128 y x) -(PairwiseAddFloat32x4 x y) => (VHADDPS128 y x) -(PairwiseSubFloat32x4 x y) => (VHSUBPS128 y x) -(SqrtFloat32x4 x) => (VSQRTPS128 x) -(SubFloat32x4 x y) => (VADDPS128 y x) -(XorFloat32x4 x y) => (VXORPS128 y x) -(AddFloat32x8 x y) => (VADDPS256 y x) -(AndFloat32x8 x y) => (VANDPS256 y x) -(AndNotFloat32x8 x y) => (VANDNPS256 y x) -(ApproximateReciprocalFloat32x8 x) => (VRCP14PS256 x) -(ApproximateReciprocalOfSqrtFloat32x8 x) => (VRSQRTPS256 x) -(DivFloat32x8 x y) => (VDIVPS256 y x) -(MaxFloat32x8 x y) => (VMAXPS256 y x) -(MinFloat32x8 x y) => (VMINPS256 y x) -(MulFloat32x8 x y) => (VMULPS256 y x) -(MulByPowOf2Float32x8 x y) => (VSCALEFPS256 y x) -(OrFloat32x8 x y) => (VORPS256 y x) -(PairwiseAddFloat32x8 x y) => (VHADDPS256 y x) -(PairwiseSubFloat32x8 x y) => (VHSUBPS256 y x) -(SqrtFloat32x8 x) => (VSQRTPS256 x) -(SubFloat32x8 x y) => (VADDPS256 y x) -(XorFloat32x8 x y) => (VXORPS256 y x) -(AddFloat64x2 x y) => (VADDPD128 y x) -(AndFloat64x2 x y) => (VANDPD128 y x) -(AndNotFloat64x2 x y) => (VANDNPD128 y x) -(ApproximateReciprocalFloat64x2 x) => (VRCP14PD128 x) -(ApproximateReciprocalOfSqrtFloat64x2 x) => (VRSQRT14PD128 x) -(DivFloat64x2 x y) => (VDIVPD128 y x) -(MaxFloat64x2 x y) => (VMAXPD128 y x) -(MinFloat64x2 x y) => (VMINPD128 y x) -(MulFloat64x2 x y) => (VMULPD128 y x) -(MulByPowOf2Float64x2 x y) => (VSCALEFPD128 y x) -(OrFloat64x2 x y) => (VORPD128 y x) -(PairwiseAddFloat64x2 x y) => (VHADDPD128 y x) -(PairwiseSubFloat64x2 x y) => (VHSUBPD128 y x) -(SqrtFloat64x2 x) => (VSQRTPD128 x) -(SubFloat64x2 x y) => (VADDPD128 y x) -(XorFloat64x2 x y) => (VXORPD128 y x) -(AddFloat64x4 x y) => (VADDPD256 y x) -(AndFloat64x4 x y) => (VANDPD256 y x) -(AndNotFloat64x4 x y) => (VANDNPD256 y x) -(ApproximateReciprocalFloat64x4 x) => (VRCP14PD256 x) -(ApproximateReciprocalOfSqrtFloat64x4 x) => (VRSQRT14PD256 x) -(DivFloat64x4 x y) => (VDIVPD256 y x) -(MaxFloat64x4 x y) => (VMAXPD256 y x) -(MinFloat64x4 x y) => (VMINPD256 y x) -(MulFloat64x4 x y) => (VMULPD256 y x) -(MulByPowOf2Float64x4 x y) => (VSCALEFPD256 y x) -(OrFloat64x4 x y) => (VORPD256 y x) -(PairwiseAddFloat64x4 x y) => (VHADDPD256 y x) -(PairwiseSubFloat64x4 x y) => (VHSUBPD256 y x) -(SqrtFloat64x4 x) => (VSQRTPD256 x) -(SubFloat64x4 x y) => (VADDPD256 y x) -(XorFloat64x4 x y) => (VXORPD256 y x) -(AddFloat64x8 x y) => (VADDPD512 y x) -(AndFloat64x8 x y) => (VANDPD512 y x) -(AndNotFloat64x8 x y) => (VANDNPD512 y x) -(ApproximateReciprocalFloat64x8 x) => (VRCP14PD512 x) -(ApproximateReciprocalOfSqrtFloat64x8 x) => (VRSQRT14PD512 x) -(DivFloat64x8 x y) => (VDIVPD512 y x) -(MaxFloat64x8 x y) => (VMAXPD512 y x) -(MinFloat64x8 x y) => (VMINPD512 y x) -(MulFloat64x8 x y) => (VMULPD512 y x) -(MulByPowOf2Float64x8 x y) => (VSCALEFPD512 y x) -(OrFloat64x8 x y) => (VORPD512 y x) -(SqrtFloat64x8 x) => (VSQRTPD512 x) -(SubFloat64x8 x y) => (VADDPD512 y x) -(XorFloat64x8 x y) => (VXORPD512 y x) -(AbsoluteInt16x16 x) => (VPABSW256 x) -(AddInt16x16 x y) => (VPADDW256 y x) -(AndInt16x16 x y) => (VPAND256 y x) -(AndNotInt16x16 x y) => (VPANDN256 y x) -(EqualInt16x16 x y) => (VPCMPEQW256 y x) -(GreaterInt16x16 x y) => (VPCMPGTW256 y x) -(MaxInt16x16 x y) => (VPMAXSW256 y x) -(MinInt16x16 x y) => (VPMINSW256 y x) -(MulHighInt16x16 x y) => (VPMULHW256 y x) -(MulLowInt16x16 x y) => (VPMULLW256 y x) -(OrInt16x16 x y) => (VPOR256 y x) -(PairwiseAddInt16x16 x y) => (VPHADDW256 y x) -(PairwiseSubInt16x16 x y) => (VPHSUBW256 y x) -(PopCountInt16x16 x) => (VPOPCNTW256 x) -(SaturatedAddInt16x16 x y) => (VPADDSW256 y x) -(SaturatedPairwiseAddInt16x16 x y) => (VPHADDSW256 y x) -(SaturatedPairwiseSubInt16x16 x y) => (VPHSUBSW256 y x) -(SaturatedSubInt16x16 x y) => (VPSUBSW256 y x) -(SignInt16x16 x y) => (VPSIGNW256 y x) -(SubInt16x16 x y) => (VPSUBW256 y x) -(XorInt16x16 x y) => (VPXOR256 y x) -(AbsoluteInt16x32 x) => (VPABSW512 x) -(AddInt16x32 x y) => (VPADDW512 y x) -(MaxInt16x32 x y) => (VPMAXSW512 y x) -(MinInt16x32 x y) => (VPMINSW512 y x) -(MulHighInt16x32 x y) => (VPMULHW512 y x) -(MulLowInt16x32 x y) => (VPMULLW512 y x) -(PopCountInt16x32 x) => (VPOPCNTW512 x) -(SaturatedAddInt16x32 x y) => (VPADDSW512 y x) -(SaturatedSubInt16x32 x y) => (VPSUBSW512 y x) -(SubInt16x32 x y) => (VPSUBW512 y x) -(AbsoluteInt16x8 x) => (VPABSW128 x) -(AddInt16x8 x y) => (VPADDW128 y x) -(AndInt16x8 x y) => (VPAND128 y x) -(AndNotInt16x8 x y) => (VPANDN128 y x) -(EqualInt16x8 x y) => (VPCMPEQW128 y x) -(GreaterInt16x8 x y) => (VPCMPGTW128 y x) -(MaxInt16x8 x y) => (VPMAXSW128 y x) -(MinInt16x8 x y) => (VPMINSW128 y x) -(MulHighInt16x8 x y) => (VPMULHW128 y x) -(MulLowInt16x8 x y) => (VPMULLW128 y x) -(OrInt16x8 x y) => (VPOR128 y x) -(PairwiseAddInt16x8 x y) => (VPHADDW128 y x) -(PairwiseSubInt16x8 x y) => (VPHSUBW128 y x) -(PopCountInt16x8 x) => (VPOPCNTW128 x) -(SaturatedAddInt16x8 x y) => (VPADDSW128 y x) -(SaturatedPairwiseAddInt16x8 x y) => (VPHADDSW128 y x) -(SaturatedPairwiseSubInt16x8 x y) => (VPHSUBSW128 y x) -(SaturatedSubInt16x8 x y) => (VPSUBSW128 y x) -(SignInt16x8 x y) => (VPSIGNW128 y x) -(SubInt16x8 x y) => (VPSUBW128 y x) -(XorInt16x8 x y) => (VPXOR128 y x) -(AbsoluteInt32x16 x) => (VPABSD512 x) -(AddInt32x16 x y) => (VPADDD512 y x) -(AndInt32x16 x y) => (VPANDD512 y x) -(AndNotInt32x16 x y) => (VPANDND512 y x) -(MaxInt32x16 x y) => (VPMAXSD512 y x) -(MinInt32x16 x y) => (VPMINSD512 y x) -(MulLowInt32x16 x y) => (VPMULLD512 y x) -(OrInt32x16 x y) => (VPORD512 y x) -(PopCountInt32x16 x) => (VPOPCNTD512 x) -(SubInt32x16 x y) => (VPSUBD512 y x) -(XorInt32x16 x y) => (VPXORD512 y x) -(AbsoluteInt32x4 x) => (VPABSD128 x) -(AddInt32x4 x y) => (VPADDD128 y x) -(AndInt32x4 x y) => (VPAND128 y x) -(AndNotInt32x4 x y) => (VPANDN128 y x) -(EqualInt32x4 x y) => (VPCMPEQD128 y x) -(GreaterInt32x4 x y) => (VPCMPGTD128 y x) -(MaxInt32x4 x y) => (VPMAXSD128 y x) -(MinInt32x4 x y) => (VPMINSD128 y x) -(MulEvenWidenInt32x4 x y) => (VPMULDQ128 y x) -(MulLowInt32x4 x y) => (VPMULLD128 y x) -(OrInt32x4 x y) => (VPOR128 y x) -(PairwiseAddInt32x4 x y) => (VPHADDD128 y x) -(PairwiseSubInt32x4 x y) => (VPHSUBD128 y x) -(PopCountInt32x4 x) => (VPOPCNTD128 x) -(SignInt32x4 x y) => (VPSIGND128 y x) -(SubInt32x4 x y) => (VPSUBD128 y x) -(XorInt32x4 x y) => (VPXOR128 y x) -(AbsoluteInt32x8 x) => (VPABSD256 x) -(AddInt32x8 x y) => (VPADDD256 y x) -(AndInt32x8 x y) => (VPAND256 y x) -(AndNotInt32x8 x y) => (VPANDN256 y x) -(EqualInt32x8 x y) => (VPCMPEQD256 y x) -(GreaterInt32x8 x y) => (VPCMPGTD256 y x) -(MaxInt32x8 x y) => (VPMAXSD256 y x) -(MinInt32x8 x y) => (VPMINSD256 y x) -(MulEvenWidenInt32x8 x y) => (VPMULDQ256 y x) -(MulLowInt32x8 x y) => (VPMULLD256 y x) -(OrInt32x8 x y) => (VPOR256 y x) -(PairwiseAddInt32x8 x y) => (VPHADDD256 y x) -(PairwiseSubInt32x8 x y) => (VPHSUBD256 y x) -(PopCountInt32x8 x) => (VPOPCNTD256 x) -(SignInt32x8 x y) => (VPSIGND256 y x) -(SubInt32x8 x y) => (VPSUBD256 y x) -(XorInt32x8 x y) => (VPXOR256 y x) -(AbsoluteInt64x2 x) => (VPABSQ128 x) -(AddInt64x2 x y) => (VPADDQ128 y x) -(AndInt64x2 x y) => (VPAND128 y x) -(AndNotInt64x2 x y) => (VPANDN128 y x) -(EqualInt64x2 x y) => (VPCMPEQQ128 y x) -(MaxInt64x2 x y) => (VPMAXSQ128 y x) -(MinInt64x2 x y) => (VPMINSQ128 y x) -(MulEvenWidenInt64x2 x y) => (VPMULDQ128 y x) -(MulLowInt64x2 x y) => (VPMULLQ128 y x) -(OrInt64x2 x y) => (VPOR128 y x) -(PopCountInt64x2 x) => (VPOPCNTQ128 x) -(SubInt64x2 x y) => (VPSUBQ128 y x) -(XorInt64x2 x y) => (VPXOR128 y x) -(AbsoluteInt64x4 x) => (VPABSQ256 x) -(AddInt64x4 x y) => (VPADDQ256 y x) -(AndInt64x4 x y) => (VPAND256 y x) -(AndNotInt64x4 x y) => (VPANDN256 y x) -(EqualInt64x4 x y) => (VPCMPEQQ256 y x) -(GreaterInt64x4 x y) => (VPCMPGTQ256 y x) -(MaxInt64x4 x y) => (VPMAXSQ256 y x) -(MinInt64x4 x y) => (VPMINSQ256 y x) -(MulEvenWidenInt64x4 x y) => (VPMULDQ256 y x) -(MulLowInt64x4 x y) => (VPMULLQ256 y x) -(OrInt64x4 x y) => (VPOR256 y x) -(PopCountInt64x4 x) => (VPOPCNTQ256 x) -(SubInt64x4 x y) => (VPSUBQ256 y x) -(XorInt64x4 x y) => (VPXOR256 y x) -(AbsoluteInt64x8 x) => (VPABSQ512 x) -(AddInt64x8 x y) => (VPADDQ512 y x) -(AndInt64x8 x y) => (VPANDQ512 y x) -(AndNotInt64x8 x y) => (VPANDNQ512 y x) -(MaxInt64x8 x y) => (VPMAXSQ512 y x) -(MinInt64x8 x y) => (VPMINSQ512 y x) -(MulEvenWidenInt64x8 x y) => (VPMULDQ512 y x) -(MulLowInt64x8 x y) => (VPMULLQ512 y x) -(OrInt64x8 x y) => (VPORQ512 y x) -(PopCountInt64x8 x) => (VPOPCNTQ512 x) -(SubInt64x8 x y) => (VPSUBQ512 y x) -(XorInt64x8 x y) => (VPXORQ512 y x) -(AbsoluteInt8x16 x) => (VPABSB128 x) -(AddInt8x16 x y) => (VPADDB128 y x) -(AndInt8x16 x y) => (VPAND128 y x) -(AndNotInt8x16 x y) => (VPANDN128 y x) -(EqualInt8x16 x y) => (VPCMPEQB128 y x) -(GreaterInt8x16 x y) => (VPCMPGTB128 y x) -(MaxInt8x16 x y) => (VPMAXSB128 y x) -(MinInt8x16 x y) => (VPMINSB128 y x) -(OrInt8x16 x y) => (VPOR128 y x) -(PopCountInt8x16 x) => (VPOPCNTB128 x) -(SaturatedAddInt8x16 x y) => (VPADDSB128 y x) -(SaturatedSubInt8x16 x y) => (VPSUBSB128 y x) -(SignInt8x16 x y) => (VPSIGNB128 y x) -(SubInt8x16 x y) => (VPSUBB128 y x) -(XorInt8x16 x y) => (VPXOR128 y x) -(AbsoluteInt8x32 x) => (VPABSB256 x) -(AddInt8x32 x y) => (VPADDB256 y x) -(AndInt8x32 x y) => (VPAND256 y x) -(AndNotInt8x32 x y) => (VPANDN256 y x) -(EqualInt8x32 x y) => (VPCMPEQB256 y x) -(GreaterInt8x32 x y) => (VPCMPGTB256 y x) -(MaxInt8x32 x y) => (VPMAXSB256 y x) -(MinInt8x32 x y) => (VPMINSB256 y x) -(OrInt8x32 x y) => (VPOR256 y x) -(PopCountInt8x32 x) => (VPOPCNTB256 x) -(SaturatedAddInt8x32 x y) => (VPADDSB256 y x) -(SaturatedSubInt8x32 x y) => (VPSUBSB256 y x) -(SignInt8x32 x y) => (VPSIGNB256 y x) -(SubInt8x32 x y) => (VPSUBB256 y x) -(XorInt8x32 x y) => (VPXOR256 y x) -(AbsoluteInt8x64 x) => (VPABSB512 x) -(AddInt8x64 x y) => (VPADDB512 y x) -(MaxInt8x64 x y) => (VPMAXSB512 y x) -(MinInt8x64 x y) => (VPMINSB512 y x) -(PopCountInt8x64 x) => (VPOPCNTB512 x) -(SaturatedAddInt8x64 x y) => (VPADDSB512 y x) -(SaturatedSubInt8x64 x y) => (VPSUBSB512 y x) -(SubInt8x64 x y) => (VPSUBB512 y x) -(AddUint16x16 x y) => (VPADDW256 y x) -(AndUint16x16 x y) => (VPAND256 y x) -(AndNotUint16x16 x y) => (VPANDN256 y x) -(AverageUint16x16 x y) => (VPAVGW256 y x) -(MaxUint16x16 x y) => (VPMAXUW256 y x) -(MinUint16x16 x y) => (VPMINUW256 y x) -(MulHighUint16x16 x y) => (VPMULHUW256 y x) -(OrUint16x16 x y) => (VPOR256 y x) -(PairwiseAddUint16x16 x y) => (VPHADDW256 y x) -(PairwiseSubUint16x16 x y) => (VPHSUBW256 y x) -(PopCountUint16x16 x) => (VPOPCNTW256 x) -(SaturatedAddUint16x16 x y) => (VPADDSW256 y x) -(SaturatedSubUint16x16 x y) => (VPSUBSW256 y x) -(SubUint16x16 x y) => (VPSUBW256 y x) -(XorUint16x16 x y) => (VPXOR256 y x) -(AddUint16x32 x y) => (VPADDW512 y x) -(AverageUint16x32 x y) => (VPAVGW512 y x) -(MaxUint16x32 x y) => (VPMAXUW512 y x) -(MinUint16x32 x y) => (VPMINUW512 y x) -(MulHighUint16x32 x y) => (VPMULHUW512 y x) -(PopCountUint16x32 x) => (VPOPCNTW512 x) -(SaturatedAddUint16x32 x y) => (VPADDSW512 y x) -(SaturatedSubUint16x32 x y) => (VPSUBSW512 y x) -(SubUint16x32 x y) => (VPSUBW512 y x) -(AddUint16x8 x y) => (VPADDW128 y x) -(AndUint16x8 x y) => (VPAND128 y x) -(AndNotUint16x8 x y) => (VPANDN128 y x) -(AverageUint16x8 x y) => (VPAVGW128 y x) -(MaxUint16x8 x y) => (VPMAXUW128 y x) -(MinUint16x8 x y) => (VPMINUW128 y x) -(MulHighUint16x8 x y) => (VPMULHUW128 y x) -(OrUint16x8 x y) => (VPOR128 y x) -(PairwiseAddUint16x8 x y) => (VPHADDW128 y x) -(PairwiseSubUint16x8 x y) => (VPHSUBW128 y x) -(PopCountUint16x8 x) => (VPOPCNTW128 x) -(SaturatedAddUint16x8 x y) => (VPADDSW128 y x) -(SaturatedSubUint16x8 x y) => (VPSUBSW128 y x) -(SubUint16x8 x y) => (VPSUBW128 y x) -(XorUint16x8 x y) => (VPXOR128 y x) -(AddUint32x16 x y) => (VPADDD512 y x) -(AndUint32x16 x y) => (VPANDD512 y x) -(AndNotUint32x16 x y) => (VPANDND512 y x) -(MaxUint32x16 x y) => (VPMAXUD512 y x) -(MinUint32x16 x y) => (VPMINUD512 y x) -(OrUint32x16 x y) => (VPORD512 y x) -(PopCountUint32x16 x) => (VPOPCNTD512 x) -(SubUint32x16 x y) => (VPSUBD512 y x) -(XorUint32x16 x y) => (VPXORD512 y x) -(AddUint32x4 x y) => (VPADDD128 y x) -(AndUint32x4 x y) => (VPAND128 y x) -(AndNotUint32x4 x y) => (VPANDN128 y x) -(MaxUint32x4 x y) => (VPMAXUD128 y x) -(MinUint32x4 x y) => (VPMINUD128 y x) -(MulEvenWidenUint32x4 x y) => (VPMULUDQ128 y x) -(OrUint32x4 x y) => (VPOR128 y x) -(PairwiseAddUint32x4 x y) => (VPHADDD128 y x) -(PairwiseSubUint32x4 x y) => (VPHSUBD128 y x) -(PopCountUint32x4 x) => (VPOPCNTD128 x) -(SubUint32x4 x y) => (VPSUBD128 y x) -(XorUint32x4 x y) => (VPXOR128 y x) -(AddUint32x8 x y) => (VPADDD256 y x) -(AndUint32x8 x y) => (VPAND256 y x) -(AndNotUint32x8 x y) => (VPANDN256 y x) -(MaxUint32x8 x y) => (VPMAXUD256 y x) -(MinUint32x8 x y) => (VPMINUD256 y x) -(MulEvenWidenUint32x8 x y) => (VPMULUDQ256 y x) -(OrUint32x8 x y) => (VPOR256 y x) -(PairwiseAddUint32x8 x y) => (VPHADDD256 y x) -(PairwiseSubUint32x8 x y) => (VPHSUBD256 y x) -(PopCountUint32x8 x) => (VPOPCNTD256 x) -(SubUint32x8 x y) => (VPSUBD256 y x) -(XorUint32x8 x y) => (VPXOR256 y x) -(AddUint64x2 x y) => (VPADDQ128 y x) -(AndUint64x2 x y) => (VPAND128 y x) -(AndNotUint64x2 x y) => (VPANDN128 y x) -(MaxUint64x2 x y) => (VPMAXUQ128 y x) -(MinUint64x2 x y) => (VPMINUQ128 y x) -(MulEvenWidenUint64x2 x y) => (VPMULUDQ128 y x) -(OrUint64x2 x y) => (VPOR128 y x) -(PopCountUint64x2 x) => (VPOPCNTQ128 x) -(SubUint64x2 x y) => (VPSUBQ128 y x) -(XorUint64x2 x y) => (VPXOR128 y x) -(AddUint64x4 x y) => (VPADDQ256 y x) -(AndUint64x4 x y) => (VPAND256 y x) -(AndNotUint64x4 x y) => (VPANDN256 y x) -(MaxUint64x4 x y) => (VPMAXUQ256 y x) -(MinUint64x4 x y) => (VPMINUQ256 y x) -(MulEvenWidenUint64x4 x y) => (VPMULUDQ256 y x) -(OrUint64x4 x y) => (VPOR256 y x) -(PopCountUint64x4 x) => (VPOPCNTQ256 x) -(SubUint64x4 x y) => (VPSUBQ256 y x) -(XorUint64x4 x y) => (VPXOR256 y x) -(AddUint64x8 x y) => (VPADDQ512 y x) -(AndUint64x8 x y) => (VPANDQ512 y x) -(AndNotUint64x8 x y) => (VPANDNQ512 y x) -(MaxUint64x8 x y) => (VPMAXUQ512 y x) -(MinUint64x8 x y) => (VPMINUQ512 y x) -(MulEvenWidenUint64x8 x y) => (VPMULUDQ512 y x) -(OrUint64x8 x y) => (VPORQ512 y x) -(PopCountUint64x8 x) => (VPOPCNTQ512 x) -(SubUint64x8 x y) => (VPSUBQ512 y x) -(XorUint64x8 x y) => (VPXORQ512 y x) -(AddUint8x16 x y) => (VPADDB128 y x) -(AndUint8x16 x y) => (VPAND128 y x) -(AndNotUint8x16 x y) => (VPANDN128 y x) -(AverageUint8x16 x y) => (VPAVGB128 y x) -(MaxUint8x16 x y) => (VPMAXUB128 y x) -(MinUint8x16 x y) => (VPMINUB128 y x) -(OrUint8x16 x y) => (VPOR128 y x) -(PopCountUint8x16 x) => (VPOPCNTB128 x) -(SaturatedAddUint8x16 x y) => (VPADDSB128 y x) -(SaturatedSubUint8x16 x y) => (VPSUBSB128 y x) -(SubUint8x16 x y) => (VPSUBB128 y x) -(XorUint8x16 x y) => (VPXOR128 y x) -(AddUint8x32 x y) => (VPADDB256 y x) -(AndUint8x32 x y) => (VPAND256 y x) -(AndNotUint8x32 x y) => (VPANDN256 y x) -(AverageUint8x32 x y) => (VPAVGB256 y x) -(MaxUint8x32 x y) => (VPMAXUB256 y x) -(MinUint8x32 x y) => (VPMINUB256 y x) -(OrUint8x32 x y) => (VPOR256 y x) -(PopCountUint8x32 x) => (VPOPCNTB256 x) -(SaturatedAddUint8x32 x y) => (VPADDSB256 y x) -(SaturatedSubUint8x32 x y) => (VPSUBSB256 y x) -(SubUint8x32 x y) => (VPSUBB256 y x) -(XorUint8x32 x y) => (VPXOR256 y x) -(AddUint8x64 x y) => (VPADDB512 y x) -(AverageUint8x64 x y) => (VPAVGB512 y x) -(MaxUint8x64 x y) => (VPMAXUB512 y x) -(MinUint8x64 x y) => (VPMINUB512 y x) -(PopCountUint8x64 x) => (VPOPCNTB512 x) -(SaturatedAddUint8x64 x y) => (VPADDSB512 y x) -(SaturatedSubUint8x64 x y) => (VPSUBSB512 y x) -(SubUint8x64 x y) => (VPSUBB512 y x) -(EqualFloat32x4 x y) => (VCMPPS128 [0] y x) -(EqualFloat64x4 x y) => (VCMPPD256 [0] y x) -(EqualFloat32x8 x y) => (VCMPPS256 [0] y x) -(EqualFloat64x2 x y) => (VCMPPD128 [0] y x) -(GreaterFloat32x8 x y) => (VCMPPS256 [6] y x) -(GreaterFloat64x4 x y) => (VCMPPD256 [6] y x) -(GreaterFloat64x2 x y) => (VCMPPD128 [6] y x) -(GreaterFloat32x4 x y) => (VCMPPS128 [6] y x) -(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] y x) -(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] y x) -(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] y x) -(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] y x) -(IsNanFloat32x8 x y) => (VCMPPS256 [3] y x) -(IsNanFloat64x2 x y) => (VCMPPD128 [3] y x) -(IsNanFloat32x4 x y) => (VCMPPS128 [3] y x) -(IsNanFloat64x4 x y) => (VCMPPD256 [3] y x) -(LessFloat32x4 x y) => (VCMPPS128 [1] y x) -(LessFloat64x4 x y) => (VCMPPD256 [1] y x) -(LessFloat64x2 x y) => (VCMPPD128 [1] y x) -(LessFloat32x8 x y) => (VCMPPS256 [1] y x) -(LessEqualFloat32x4 x y) => (VCMPPS128 [2] y x) -(LessEqualFloat64x4 x y) => (VCMPPD256 [2] y x) -(LessEqualFloat64x2 x y) => (VCMPPD128 [2] y x) -(LessEqualFloat32x8 x y) => (VCMPPS256 [2] y x) -(NotEqualFloat64x2 x y) => (VCMPPD128 [4] y x) -(NotEqualFloat32x4 x y) => (VCMPPS128 [4] y x) -(NotEqualFloat32x8 x y) => (VCMPPS256 [4] y x) -(NotEqualFloat64x4 x y) => (VCMPPD256 [4] y x) -(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) +(AbsoluteInt16x16 ...) => (VPABSW256 ...) +(AbsoluteInt16x32 ...) => (VPABSW512 ...) +(AbsoluteInt16x8 ...) => (VPABSW128 ...) +(AbsoluteInt32x16 ...) => (VPABSD512 ...) +(AbsoluteInt32x4 ...) => (VPABSD128 ...) +(AbsoluteInt32x8 ...) => (VPABSD256 ...) +(AbsoluteInt64x2 ...) => (VPABSQ128 ...) +(AbsoluteInt64x4 ...) => (VPABSQ256 ...) +(AbsoluteInt64x8 ...) => (VPABSQ512 ...) +(AbsoluteInt8x16 ...) => (VPABSB128 ...) +(AbsoluteInt8x32 ...) => (VPABSB256 ...) +(AbsoluteInt8x64 ...) => (VPABSB512 ...) +(AddFloat32x16 ...) => (VADDPS512 ...) +(AddFloat32x4 ...) => (VADDPS128 ...) +(AddFloat32x8 ...) => (VADDPS256 ...) +(AddFloat64x2 ...) => (VADDPD128 ...) +(AddFloat64x4 ...) => (VADDPD256 ...) +(AddFloat64x8 ...) => (VADDPD512 ...) +(AddInt16x16 ...) => (VPADDW256 ...) +(AddInt16x32 ...) => (VPADDW512 ...) +(AddInt16x8 ...) => (VPADDW128 ...) +(AddInt32x16 ...) => (VPADDD512 ...) +(AddInt32x4 ...) => (VPADDD128 ...) +(AddInt32x8 ...) => (VPADDD256 ...) +(AddInt64x2 ...) => (VPADDQ128 ...) +(AddInt64x4 ...) => (VPADDQ256 ...) +(AddInt64x8 ...) => (VPADDQ512 ...) +(AddInt8x16 ...) => (VPADDB128 ...) +(AddInt8x32 ...) => (VPADDB256 ...) +(AddInt8x64 ...) => (VPADDB512 ...) +(AddUint16x16 ...) => (VPADDW256 ...) +(AddUint16x32 ...) => (VPADDW512 ...) +(AddUint16x8 ...) => (VPADDW128 ...) +(AddUint32x16 ...) => (VPADDD512 ...) +(AddUint32x4 ...) => (VPADDD128 ...) +(AddUint32x8 ...) => (VPADDD256 ...) +(AddUint64x2 ...) => (VPADDQ128 ...) +(AddUint64x4 ...) => (VPADDQ256 ...) +(AddUint64x8 ...) => (VPADDQ512 ...) +(AddUint8x16 ...) => (VPADDB128 ...) +(AddUint8x32 ...) => (VPADDB256 ...) +(AddUint8x64 ...) => (VPADDB512 ...) +(AndFloat32x16 ...) => (VANDPS512 ...) +(AndFloat32x4 ...) => (VANDPS128 ...) +(AndFloat32x8 ...) => (VANDPS256 ...) +(AndFloat64x2 ...) => (VANDPD128 ...) +(AndFloat64x4 ...) => (VANDPD256 ...) +(AndFloat64x8 ...) => (VANDPD512 ...) +(AndInt16x16 ...) => (VPAND256 ...) +(AndInt16x8 ...) => (VPAND128 ...) +(AndInt32x16 ...) => (VPANDD512 ...) +(AndInt32x4 ...) => (VPAND128 ...) +(AndInt32x8 ...) => (VPAND256 ...) +(AndInt64x2 ...) => (VPAND128 ...) +(AndInt64x4 ...) => (VPAND256 ...) +(AndInt64x8 ...) => (VPANDQ512 ...) +(AndInt8x16 ...) => (VPAND128 ...) +(AndInt8x32 ...) => (VPAND256 ...) +(AndUint16x16 ...) => (VPAND256 ...) +(AndUint16x8 ...) => (VPAND128 ...) +(AndUint32x16 ...) => (VPANDD512 ...) +(AndUint32x4 ...) => (VPAND128 ...) +(AndUint32x8 ...) => (VPAND256 ...) +(AndUint64x2 ...) => (VPAND128 ...) +(AndUint64x4 ...) => (VPAND256 ...) +(AndUint64x8 ...) => (VPANDQ512 ...) +(AndUint8x16 ...) => (VPAND128 ...) +(AndUint8x32 ...) => (VPAND256 ...) +(AndNotFloat32x16 ...) => (VANDNPS512 ...) +(AndNotFloat32x4 ...) => (VANDNPS128 ...) +(AndNotFloat32x8 ...) => (VANDNPS256 ...) +(AndNotFloat64x2 ...) => (VANDNPD128 ...) +(AndNotFloat64x4 ...) => (VANDNPD256 ...) +(AndNotFloat64x8 ...) => (VANDNPD512 ...) +(AndNotInt16x16 ...) => (VPANDN256 ...) +(AndNotInt16x8 ...) => (VPANDN128 ...) +(AndNotInt32x16 ...) => (VPANDND512 ...) +(AndNotInt32x4 ...) => (VPANDN128 ...) +(AndNotInt32x8 ...) => (VPANDN256 ...) +(AndNotInt64x2 ...) => (VPANDN128 ...) +(AndNotInt64x4 ...) => (VPANDN256 ...) +(AndNotInt64x8 ...) => (VPANDNQ512 ...) +(AndNotInt8x16 ...) => (VPANDN128 ...) +(AndNotInt8x32 ...) => (VPANDN256 ...) +(AndNotUint16x16 ...) => (VPANDN256 ...) +(AndNotUint16x8 ...) => (VPANDN128 ...) +(AndNotUint32x16 ...) => (VPANDND512 ...) +(AndNotUint32x4 ...) => (VPANDN128 ...) +(AndNotUint32x8 ...) => (VPANDN256 ...) +(AndNotUint64x2 ...) => (VPANDN128 ...) +(AndNotUint64x4 ...) => (VPANDN256 ...) +(AndNotUint64x8 ...) => (VPANDNQ512 ...) +(AndNotUint8x16 ...) => (VPANDN128 ...) +(AndNotUint8x32 ...) => (VPANDN256 ...) +(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) +(ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) +(ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) +(ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) +(ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) +(ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) +(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) +(ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) +(ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) +(ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) +(ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) +(ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) +(AverageUint16x16 ...) => (VPAVGW256 ...) +(AverageUint16x32 ...) => (VPAVGW512 ...) +(AverageUint16x8 ...) => (VPAVGW128 ...) +(AverageUint8x16 ...) => (VPAVGB128 ...) +(AverageUint8x32 ...) => (VPAVGB256 ...) +(AverageUint8x64 ...) => (VPAVGB512 ...) +(DivFloat32x16 ...) => (VDIVPS512 ...) +(DivFloat32x4 ...) => (VDIVPS128 ...) +(DivFloat32x8 ...) => (VDIVPS256 ...) +(DivFloat64x2 ...) => (VDIVPD128 ...) +(DivFloat64x4 ...) => (VDIVPD256 ...) +(DivFloat64x8 ...) => (VDIVPD512 ...) +(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) +(EqualFloat32x4 x y) => (VCMPPS128 [0] x y) +(EqualFloat32x8 x y) => (VCMPPS256 [0] x y) +(EqualFloat64x2 x y) => (VCMPPD128 [0] x y) +(EqualFloat64x4 x y) => (VCMPPD256 [0] x y) +(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) +(EqualInt16x16 ...) => (VPCMPEQW256 ...) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) +(EqualInt16x8 ...) => (VPCMPEQW128 ...) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) +(EqualInt32x4 ...) => (VPCMPEQD128 ...) +(EqualInt32x8 ...) => (VPCMPEQD256 ...) +(EqualInt64x2 ...) => (VPCMPEQQ128 ...) +(EqualInt64x4 ...) => (VPCMPEQQ256 ...) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) +(EqualInt8x16 ...) => (VPCMPEQB128 ...) +(EqualInt8x32 ...) => (VPCMPEQB256 ...) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) +(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) +(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) +(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) +(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) +(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) +(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) +(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) +(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) +(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) +(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) +(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) +(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) +(GreaterFloat64x2 x y) => (VCMPPD128 [6] x y) +(GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) +(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) +(GreaterInt16x16 ...) => (VPCMPGTW256 ...) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 x y)) +(GreaterInt16x8 ...) => (VPCMPGTW128 ...) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPGTD512 x y)) +(GreaterInt32x4 ...) => (VPCMPGTD128 ...) +(GreaterInt32x8 ...) => (VPCMPGTD256 ...) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) +(GreaterInt64x4 ...) => (VPCMPGTQ256 ...) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) +(GreaterInt8x16 ...) => (VPCMPGTB128 ...) +(GreaterInt8x32 ...) => (VPCMPGTB256 ...) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPGTB512 x y)) +(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) +(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) +(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) +(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) +(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) +(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) +(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) +(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y) +(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y) +(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y) +(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y) +(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) +(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) +(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) +(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) +(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) +(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) +(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) +(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) +(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) +(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) +(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) +(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) +(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) +(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) +(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) +(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) +(IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) +(IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) +(IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) +(IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) +(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) +(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) +(LessFloat32x4 x y) => (VCMPPS128 [1] x y) +(LessFloat32x8 x y) => (VCMPPS256 [1] x y) +(LessFloat64x2 x y) => (VCMPPD128 [1] x y) +(LessFloat64x4 x y) => (VCMPPD256 [1] x y) +(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) +(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) +(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) +(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) +(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) +(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) +(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) +(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) +(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) +(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) +(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) +(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) +(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) +(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) +(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) +(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) +(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) +(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) +(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) +(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) +(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) +(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) +(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) +(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) +(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) +(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) +(LessEqualFloat32x4 x y) => (VCMPPS128 [2] x y) +(LessEqualFloat32x8 x y) => (VCMPPS256 [2] x y) +(LessEqualFloat64x2 x y) => (VCMPPD128 [2] x y) +(LessEqualFloat64x4 x y) => (VCMPPD256 [2] x y) +(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) +(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) +(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) +(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) +(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) +(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) +(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) +(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) +(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) +(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) +(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) +(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) +(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) +(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) +(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) +(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) +(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) +(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) +(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) +(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) +(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) +(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) +(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) +(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) +(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) +(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) +(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) +(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) +(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) +(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) +(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) +(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) +(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) +(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) +(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) +(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) +(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) +(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrInt32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) (MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrInt32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrInt32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrInt64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrInt64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrInt64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) (MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) (MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) (MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) (MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) (MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) (MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrUint32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) (MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrUint32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrUint32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrUint64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrUint64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrUint64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) (MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) (MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) (MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) -(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 y x)) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 y x)) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) -(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) -(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) -(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) -(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) -(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) -(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) -(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) -(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) -(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) -(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) -(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) -(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) -(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) -(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) -(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) -(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) -(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) -(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) -(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) -(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) -(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) -(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) -(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) -(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) -(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) -(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) -(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) -(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) -(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) -(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) -(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) -(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) -(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) -(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) -(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) -(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) -(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) -(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) -(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) -(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) -(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) -(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) -(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) -(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) -(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) -(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) -(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) -(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) -(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) -(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) -(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) -(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) -(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) -(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) -(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) -(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) -(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) -(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) -(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) -(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) -(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) -(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) -(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) -(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) -(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) -(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) -(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) -(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) -(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) -(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) -(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) -(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) -(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) -(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) -(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) -(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) -(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) -(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) -(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) -(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) -(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) -(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) -(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) -(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) -(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) -(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) -(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) -(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) -(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) -(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) -(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) -(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) -(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) -(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) -(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) -(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) -(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) -(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) -(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) -(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) -(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) -(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) -(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) -(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) -(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) -(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) -(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) -(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) -(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) -(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) -(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) -(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) -(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) -(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) -(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) -(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) -(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) -(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) -(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) -(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) -(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) -(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) -(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) -(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) -(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) -(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) -(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) -(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) -(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) -(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) -(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) -(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) -(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) -(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) -(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) -(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) -(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) -(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) -(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) -(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) -(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) -(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) -(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) -(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) -(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) -(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) -(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) -(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) -(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) -(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) -(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) -(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) -(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) -(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) -(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) -(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) -(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) -(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) -(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) -(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) -(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) -(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) -(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) -(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) -(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) -(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) -(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) -(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) -(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) -(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) -(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) -(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) -(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) +(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaxFloat32x16 ...) => (VMAXPS512 ...) +(MaxFloat32x4 ...) => (VMAXPS128 ...) +(MaxFloat32x8 ...) => (VMAXPS256 ...) +(MaxFloat64x2 ...) => (VMAXPD128 ...) +(MaxFloat64x4 ...) => (VMAXPD256 ...) +(MaxFloat64x8 ...) => (VMAXPD512 ...) +(MaxInt16x16 ...) => (VPMAXSW256 ...) +(MaxInt16x32 ...) => (VPMAXSW512 ...) +(MaxInt16x8 ...) => (VPMAXSW128 ...) +(MaxInt32x16 ...) => (VPMAXSD512 ...) +(MaxInt32x4 ...) => (VPMAXSD128 ...) +(MaxInt32x8 ...) => (VPMAXSD256 ...) +(MaxInt64x2 ...) => (VPMAXSQ128 ...) +(MaxInt64x4 ...) => (VPMAXSQ256 ...) +(MaxInt64x8 ...) => (VPMAXSQ512 ...) +(MaxInt8x16 ...) => (VPMAXSB128 ...) +(MaxInt8x32 ...) => (VPMAXSB256 ...) +(MaxInt8x64 ...) => (VPMAXSB512 ...) +(MaxUint16x16 ...) => (VPMAXUW256 ...) +(MaxUint16x32 ...) => (VPMAXUW512 ...) +(MaxUint16x8 ...) => (VPMAXUW128 ...) +(MaxUint32x16 ...) => (VPMAXUD512 ...) +(MaxUint32x4 ...) => (VPMAXUD128 ...) +(MaxUint32x8 ...) => (VPMAXUD256 ...) +(MaxUint64x2 ...) => (VPMAXUQ128 ...) +(MaxUint64x4 ...) => (VPMAXUQ256 ...) +(MaxUint64x8 ...) => (VPMAXUQ512 ...) +(MaxUint8x16 ...) => (VPMAXUB128 ...) +(MaxUint8x32 ...) => (VPMAXUB256 ...) +(MaxUint8x64 ...) => (VPMAXUB512 ...) +(MinFloat32x16 ...) => (VMINPS512 ...) +(MinFloat32x4 ...) => (VMINPS128 ...) +(MinFloat32x8 ...) => (VMINPS256 ...) +(MinFloat64x2 ...) => (VMINPD128 ...) +(MinFloat64x4 ...) => (VMINPD256 ...) +(MinFloat64x8 ...) => (VMINPD512 ...) +(MinInt16x16 ...) => (VPMINSW256 ...) +(MinInt16x32 ...) => (VPMINSW512 ...) +(MinInt16x8 ...) => (VPMINSW128 ...) +(MinInt32x16 ...) => (VPMINSD512 ...) +(MinInt32x4 ...) => (VPMINSD128 ...) +(MinInt32x8 ...) => (VPMINSD256 ...) +(MinInt64x2 ...) => (VPMINSQ128 ...) +(MinInt64x4 ...) => (VPMINSQ256 ...) +(MinInt64x8 ...) => (VPMINSQ512 ...) +(MinInt8x16 ...) => (VPMINSB128 ...) +(MinInt8x32 ...) => (VPMINSB256 ...) +(MinInt8x64 ...) => (VPMINSB512 ...) +(MinUint16x16 ...) => (VPMINUW256 ...) +(MinUint16x32 ...) => (VPMINUW512 ...) +(MinUint16x8 ...) => (VPMINUW128 ...) +(MinUint32x16 ...) => (VPMINUD512 ...) +(MinUint32x4 ...) => (VPMINUD128 ...) +(MinUint32x8 ...) => (VPMINUD256 ...) +(MinUint64x2 ...) => (VPMINUQ128 ...) +(MinUint64x4 ...) => (VPMINUQ256 ...) +(MinUint64x8 ...) => (VPMINUQ512 ...) +(MinUint8x16 ...) => (VPMINUB128 ...) +(MinUint8x32 ...) => (VPMINUB256 ...) +(MinUint8x64 ...) => (VPMINUB512 ...) +(MulFloat32x16 ...) => (VMULPS512 ...) +(MulFloat32x4 ...) => (VMULPS128 ...) +(MulFloat32x8 ...) => (VMULPS256 ...) +(MulFloat64x2 ...) => (VMULPD128 ...) +(MulFloat64x4 ...) => (VMULPD256 ...) +(MulFloat64x8 ...) => (VMULPD512 ...) +(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) +(MulByPowOf2Float32x4 ...) => (VSCALEFPS128 ...) +(MulByPowOf2Float32x8 ...) => (VSCALEFPS256 ...) +(MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) +(MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) +(MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) +(MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) +(MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) +(MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) +(MulEvenWidenInt64x4 ...) => (VPMULDQ256 ...) +(MulEvenWidenInt64x8 ...) => (VPMULDQ512 ...) +(MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) +(MulEvenWidenUint32x8 ...) => (VPMULUDQ256 ...) +(MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) +(MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) +(MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) +(MulHighInt16x16 ...) => (VPMULHW256 ...) +(MulHighInt16x32 ...) => (VPMULHW512 ...) +(MulHighInt16x8 ...) => (VPMULHW128 ...) +(MulHighUint16x16 ...) => (VPMULHUW256 ...) +(MulHighUint16x32 ...) => (VPMULHUW512 ...) +(MulHighUint16x8 ...) => (VPMULHUW128 ...) +(MulLowInt16x16 ...) => (VPMULLW256 ...) +(MulLowInt16x32 ...) => (VPMULLW512 ...) +(MulLowInt16x8 ...) => (VPMULLW128 ...) +(MulLowInt32x16 ...) => (VPMULLD512 ...) +(MulLowInt32x4 ...) => (VPMULLD128 ...) +(MulLowInt32x8 ...) => (VPMULLD256 ...) +(MulLowInt64x2 ...) => (VPMULLQ128 ...) +(MulLowInt64x4 ...) => (VPMULLQ256 ...) +(MulLowInt64x8 ...) => (VPMULLQ512 ...) +(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) +(NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) +(NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) +(NotEqualFloat64x2 x y) => (VCMPPD128 [4] x y) +(NotEqualFloat64x4 x y) => (VCMPPD256 [4] x y) +(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) +(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) +(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) +(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) +(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) +(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) +(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) +(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) +(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) +(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) +(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) +(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) +(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) +(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) +(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) +(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) +(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) +(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) +(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) +(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) +(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) +(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) +(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) +(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) +(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) +(OrFloat32x16 ...) => (VORPS512 ...) +(OrFloat32x4 ...) => (VORPS128 ...) +(OrFloat32x8 ...) => (VORPS256 ...) +(OrFloat64x2 ...) => (VORPD128 ...) +(OrFloat64x4 ...) => (VORPD256 ...) +(OrFloat64x8 ...) => (VORPD512 ...) +(OrInt16x16 ...) => (VPOR256 ...) +(OrInt16x8 ...) => (VPOR128 ...) +(OrInt32x16 ...) => (VPORD512 ...) +(OrInt32x4 ...) => (VPOR128 ...) +(OrInt32x8 ...) => (VPOR256 ...) +(OrInt64x2 ...) => (VPOR128 ...) +(OrInt64x4 ...) => (VPOR256 ...) +(OrInt64x8 ...) => (VPORQ512 ...) +(OrInt8x16 ...) => (VPOR128 ...) +(OrInt8x32 ...) => (VPOR256 ...) +(OrUint16x16 ...) => (VPOR256 ...) +(OrUint16x8 ...) => (VPOR128 ...) +(OrUint32x16 ...) => (VPORD512 ...) +(OrUint32x4 ...) => (VPOR128 ...) +(OrUint32x8 ...) => (VPOR256 ...) +(OrUint64x2 ...) => (VPOR128 ...) +(OrUint64x4 ...) => (VPOR256 ...) +(OrUint64x8 ...) => (VPORQ512 ...) +(OrUint8x16 ...) => (VPOR128 ...) +(OrUint8x32 ...) => (VPOR256 ...) +(PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) +(PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) +(PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) +(PairwiseAddFloat64x4 ...) => (VHADDPD256 ...) +(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) +(PairwiseAddInt16x8 ...) => (VPHADDW128 ...) +(PairwiseAddInt32x4 ...) => (VPHADDD128 ...) +(PairwiseAddInt32x8 ...) => (VPHADDD256 ...) +(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) +(PairwiseAddUint16x8 ...) => (VPHADDW128 ...) +(PairwiseAddUint32x4 ...) => (VPHADDD128 ...) +(PairwiseAddUint32x8 ...) => (VPHADDD256 ...) +(PairwiseSubFloat32x4 ...) => (VHSUBPS128 ...) +(PairwiseSubFloat32x8 ...) => (VHSUBPS256 ...) +(PairwiseSubFloat64x2 ...) => (VHSUBPD128 ...) +(PairwiseSubFloat64x4 ...) => (VHSUBPD256 ...) +(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) +(PairwiseSubInt16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubInt32x4 ...) => (VPHSUBD128 ...) +(PairwiseSubInt32x8 ...) => (VPHSUBD256 ...) +(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) +(PairwiseSubUint16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) +(PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) +(PopCountInt16x16 ...) => (VPOPCNTW256 ...) +(PopCountInt16x32 ...) => (VPOPCNTW512 ...) +(PopCountInt16x8 ...) => (VPOPCNTW128 ...) +(PopCountInt32x16 ...) => (VPOPCNTD512 ...) +(PopCountInt32x4 ...) => (VPOPCNTD128 ...) +(PopCountInt32x8 ...) => (VPOPCNTD256 ...) +(PopCountInt64x2 ...) => (VPOPCNTQ128 ...) +(PopCountInt64x4 ...) => (VPOPCNTQ256 ...) +(PopCountInt64x8 ...) => (VPOPCNTQ512 ...) +(PopCountInt8x16 ...) => (VPOPCNTB128 ...) +(PopCountInt8x32 ...) => (VPOPCNTB256 ...) +(PopCountInt8x64 ...) => (VPOPCNTB512 ...) +(PopCountUint16x16 ...) => (VPOPCNTW256 ...) +(PopCountUint16x32 ...) => (VPOPCNTW512 ...) +(PopCountUint16x8 ...) => (VPOPCNTW128 ...) +(PopCountUint32x16 ...) => (VPOPCNTD512 ...) +(PopCountUint32x4 ...) => (VPOPCNTD128 ...) +(PopCountUint32x8 ...) => (VPOPCNTD256 ...) +(PopCountUint64x2 ...) => (VPOPCNTQ128 ...) +(PopCountUint64x4 ...) => (VPOPCNTQ256 ...) +(PopCountUint64x8 ...) => (VPOPCNTQ512 ...) +(PopCountUint8x16 ...) => (VPOPCNTB128 ...) +(PopCountUint8x32 ...) => (VPOPCNTB256 ...) +(PopCountUint8x64 ...) => (VPOPCNTB512 ...) +(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) +(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) +(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) +(SaturatedAddInt8x16 ...) => (VPADDSB128 ...) +(SaturatedAddInt8x32 ...) => (VPADDSB256 ...) +(SaturatedAddInt8x64 ...) => (VPADDSB512 ...) +(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) +(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) +(SaturatedAddUint8x16 ...) => (VPADDSB128 ...) +(SaturatedAddUint8x32 ...) => (VPADDSB256 ...) +(SaturatedAddUint8x64 ...) => (VPADDSB512 ...) +(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) +(SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) +(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) +(SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) +(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubInt8x16 ...) => (VPSUBSB128 ...) +(SaturatedSubInt8x32 ...) => (VPSUBSB256 ...) +(SaturatedSubInt8x64 ...) => (VPSUBSB512 ...) +(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) +(SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) +(SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) +(SignInt16x16 ...) => (VPSIGNW256 ...) +(SignInt16x8 ...) => (VPSIGNW128 ...) +(SignInt32x4 ...) => (VPSIGND128 ...) +(SignInt32x8 ...) => (VPSIGND256 ...) +(SignInt8x16 ...) => (VPSIGNB128 ...) +(SignInt8x32 ...) => (VPSIGNB256 ...) +(SqrtFloat32x16 ...) => (VSQRTPS512 ...) +(SqrtFloat32x4 ...) => (VSQRTPS128 ...) +(SqrtFloat32x8 ...) => (VSQRTPS256 ...) +(SqrtFloat64x2 ...) => (VSQRTPD128 ...) +(SqrtFloat64x4 ...) => (VSQRTPD256 ...) +(SqrtFloat64x8 ...) => (VSQRTPD512 ...) +(SubFloat32x16 ...) => (VADDPS512 ...) +(SubFloat32x4 ...) => (VADDPS128 ...) +(SubFloat32x8 ...) => (VADDPS256 ...) +(SubFloat64x2 ...) => (VADDPD128 ...) +(SubFloat64x4 ...) => (VADDPD256 ...) +(SubFloat64x8 ...) => (VADDPD512 ...) +(SubInt16x16 ...) => (VPSUBW256 ...) +(SubInt16x32 ...) => (VPSUBW512 ...) +(SubInt16x8 ...) => (VPSUBW128 ...) +(SubInt32x16 ...) => (VPSUBD512 ...) +(SubInt32x4 ...) => (VPSUBD128 ...) +(SubInt32x8 ...) => (VPSUBD256 ...) +(SubInt64x2 ...) => (VPSUBQ128 ...) +(SubInt64x4 ...) => (VPSUBQ256 ...) +(SubInt64x8 ...) => (VPSUBQ512 ...) +(SubInt8x16 ...) => (VPSUBB128 ...) +(SubInt8x32 ...) => (VPSUBB256 ...) +(SubInt8x64 ...) => (VPSUBB512 ...) +(SubUint16x16 ...) => (VPSUBW256 ...) +(SubUint16x32 ...) => (VPSUBW512 ...) +(SubUint16x8 ...) => (VPSUBW128 ...) +(SubUint32x16 ...) => (VPSUBD512 ...) +(SubUint32x4 ...) => (VPSUBD128 ...) +(SubUint32x8 ...) => (VPSUBD256 ...) +(SubUint64x2 ...) => (VPSUBQ128 ...) +(SubUint64x4 ...) => (VPSUBQ256 ...) +(SubUint64x8 ...) => (VPSUBQ512 ...) +(SubUint8x16 ...) => (VPSUBB128 ...) +(SubUint8x32 ...) => (VPSUBB256 ...) +(SubUint8x64 ...) => (VPSUBB512 ...) +(XorFloat32x16 ...) => (VXORPS512 ...) +(XorFloat32x4 ...) => (VXORPS128 ...) +(XorFloat32x8 ...) => (VXORPS256 ...) +(XorFloat64x2 ...) => (VXORPD128 ...) +(XorFloat64x4 ...) => (VXORPD256 ...) +(XorFloat64x8 ...) => (VXORPD512 ...) +(XorInt16x16 ...) => (VPXOR256 ...) +(XorInt16x8 ...) => (VPXOR128 ...) +(XorInt32x16 ...) => (VPXORD512 ...) +(XorInt32x4 ...) => (VPXOR128 ...) +(XorInt32x8 ...) => (VPXOR256 ...) +(XorInt64x2 ...) => (VPXOR128 ...) +(XorInt64x4 ...) => (VPXOR256 ...) +(XorInt64x8 ...) => (VPXORQ512 ...) +(XorInt8x16 ...) => (VPXOR128 ...) +(XorInt8x32 ...) => (VPXOR256 ...) +(XorUint16x16 ...) => (VPXOR256 ...) +(XorUint16x8 ...) => (VPXOR128 ...) +(XorUint32x16 ...) => (VPXORD512 ...) +(XorUint32x4 ...) => (VPXOR128 ...) +(XorUint32x8 ...) => (VPXOR256 ...) +(XorUint64x2 ...) => (VPXOR128 ...) +(XorUint64x4 ...) => (VPXOR256 ...) +(XorUint64x8 ...) => (VPXORQ512 ...) +(XorUint8x16 ...) => (VPXOR128 ...) +(XorUint8x32 ...) => (VPXOR256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index a27ed4afb9ef70..b9709ca819210f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,591 +1,607 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, - {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, - {name: "VANDNPS512", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, - {name: "VRCP14PS512", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PS512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, - {name: "VDIVPS512", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, - {name: "VANDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, - {name: "VRCP14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, - {name: "VDIVPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, - {name: "VORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, - {name: "VADDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec512"}, - {name: "VXORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, - {name: "VMAXPS512", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, - {name: "VMINPS512", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, - {name: "VMULPS512", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPS512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, - {name: "VORPS512", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, - {name: "VSQRTPS512", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, - {name: "VXORPS512", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, - {name: "VANDPS128", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, - {name: "VANDNPS128", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, - {name: "VRCP14PS128", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, - {name: "VRSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VDIVPS128", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: true, typ: "Vec128"}, - {name: "VANDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, - {name: "VRCP14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128"}, - {name: "VDIVPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, - {name: "VORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VXORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, - {name: "VMAXPS128", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, - {name: "VMINPS128", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, - {name: "VMULPS128", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPS128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, - {name: "VORPS128", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, - {name: "VHADDPS128", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec128"}, - {name: "VHSUBPS128", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec128"}, - {name: "VSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VADDPS128", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: false, typ: "Vec128"}, - {name: "VXORPS128", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, - {name: "VADDPS256", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec256"}, - {name: "VANDPS256", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, - {name: "VANDNPS256", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, - {name: "VRCP14PS256", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, - {name: "VRSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VDIVPS256", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, - {name: "VANDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, - {name: "VRCP14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256"}, - {name: "VDIVPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, - {name: "VORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec256"}, - {name: "VXORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, - {name: "VMAXPS256", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, - {name: "VMINPS256", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, - {name: "VMULPS256", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPS256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, - {name: "VORPS256", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, - {name: "VHADDPS256", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec256"}, - {name: "VHSUBPS256", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec256"}, - {name: "VSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VXORPS256", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, - {name: "VADDPD128", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, - {name: "VANDPD128", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, - {name: "VANDNPD128", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, - {name: "VRCP14PD128", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PD128", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, - {name: "VDIVPD128", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, - {name: "VANDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, - {name: "VRCP14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, - {name: "VDIVPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, - {name: "VORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, - {name: "VXORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, - {name: "VMAXPD128", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, - {name: "VMINPD128", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, - {name: "VMULPD128", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPD128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, - {name: "VORPD128", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, - {name: "VHADDPD128", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec128"}, - {name: "VHSUBPD128", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec128"}, - {name: "VSQRTPD128", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, - {name: "VXORPD128", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, - {name: "VADDPD256", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec256"}, - {name: "VANDPD256", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, - {name: "VANDNPD256", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, - {name: "VRCP14PD256", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PD256", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, - {name: "VDIVPD256", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, - {name: "VANDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, - {name: "VRCP14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, - {name: "VDIVPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, - {name: "VORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec256"}, - {name: "VXORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, - {name: "VMAXPD256", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, - {name: "VMINPD256", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, - {name: "VMULPD256", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPD256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, - {name: "VORPD256", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, - {name: "VHADDPD256", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec256"}, - {name: "VHSUBPD256", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec256"}, - {name: "VSQRTPD256", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, - {name: "VXORPD256", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, - {name: "VANDPD512", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, - {name: "VANDNPD512", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, - {name: "VRCP14PD512", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PD512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, - {name: "VDIVPD512", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, - {name: "VANDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, - {name: "VRCP14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, - {name: "VDIVPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, - {name: "VORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, - {name: "VADDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, - {name: "VXORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, - {name: "VMAXPD512", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, - {name: "VMINPD512", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, - {name: "VMULPD512", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPD512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, - {name: "VORPD512", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, - {name: "VSQRTPD512", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, - {name: "VADDPD512", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, - {name: "VXORPD512", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, - {name: "VPABSW256", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, - {name: "VPADDW256", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQW256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTW256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec256"}, - {name: "VPABSWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, - {name: "VPMAXSW256", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, - {name: "VPMINSW256", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, - {name: "VPMULHW256", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, - {name: "VPMULLW256", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, - {name: "VPHSUBW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec256"}, - {name: "VPHADDSW256", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec256"}, - {name: "VPHSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSIGNW256", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec256"}, - {name: "VPSUBW256", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, - {name: "VPABSW512", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, - {name: "VPADDW512", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQW512", argLength: 2, reg: fp2m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTW512", argLength: 2, reg: fp2m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPABSWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, - {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, - {name: "VPMAXSW512", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, - {name: "VPMINSW512", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, - {name: "VPMULHW512", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, - {name: "VPMULLW512", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, - {name: "VPSUBSW512", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, - {name: "VPABSW128", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, - {name: "VPADDW128", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQW128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTW128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec128"}, - {name: "VPABSWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPMAXSW128", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, - {name: "VPMINSW128", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, - {name: "VPMULHW128", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, - {name: "VPMULLW128", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, - {name: "VPHSUBW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec128"}, - {name: "VPHADDSW128", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec128"}, - {name: "VPHSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPSIGNW128", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec128"}, - {name: "VPABSD512", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, - {name: "VPANDD512", argLength: 2, reg: fp2fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, - {name: "VPABSDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, - {name: "VPMAXSD512", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, - {name: "VPMINSD512", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, - {name: "VPMULLD512", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, - {name: "VPORD512", argLength: 2, reg: fp2fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, - {name: "VPXORD512", argLength: 2, reg: fp2fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, - {name: "VPABSD128", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQD128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTD128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec128"}, - {name: "VPABSDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, - {name: "VPANDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec128"}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, - {name: "VPORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec128"}, - {name: "VPMAXSD128", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, - {name: "VPMINSD128", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, - {name: "VPMULLD128", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, - {name: "VPHSUBD128", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec128"}, - {name: "VPSIGND128", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec128"}, - {name: "VPSUBD128", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, - {name: "VPABSD256", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, - {name: "VPAND256", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQD256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTD256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec256"}, - {name: "VPABSDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, - {name: "VPORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec256"}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, - {name: "VPMAXSD256", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, - {name: "VPMINSD256", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, - {name: "VPMULLD256", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, - {name: "VPHSUBD256", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec256"}, - {name: "VPOPCNTD256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, - {name: "VPSIGND256", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec256"}, - {name: "VPSUBD256", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, - {name: "VPABSQ128", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQQ128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTQ128", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPABSQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, - {name: "VPANDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128"}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, - {name: "VPMINSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, - {name: "VPMULDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, - {name: "VPMULLQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, - {name: "VPMAXSQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, - {name: "VPMINSQ128", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, - {name: "VPMULDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, - {name: "VPMULLQ128", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, - {name: "VPOR128", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec128"}, - {name: "VPABSQ256", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, - {name: "VPADDQ256", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTQ", commutative: false, typ: "Vec256"}, - {name: "VPABSQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, - {name: "VPANDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256"}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, - {name: "VPMINSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, - {name: "VPMULDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, - {name: "VPMULLQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, - {name: "VPORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, - {name: "VPMAXSQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, - {name: "VPMINSQ256", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, - {name: "VPMULDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, - {name: "VPMULLQ256", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, - {name: "VPOR256", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTQ256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, - {name: "VPSUBQ256", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, - {name: "VPABSQ512", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, - {name: "VPANDQ512", argLength: 2, reg: fp2fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQQ512", argLength: 2, reg: fp2m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQ512", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPABSQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXSQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, - {name: "VPMINSQ512", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, - {name: "VPMULDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, - {name: "VPMULLQ512", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTQ512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, - {name: "VPSUBQ512", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, - {name: "VPXORQ512", argLength: 2, reg: fp2fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, - {name: "VPABSB128", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, - {name: "VPADDB128", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, - {name: "VPAND128", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQB128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTB128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec128"}, - {name: "VPABSBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, - {name: "VPMAXSB128", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, - {name: "VPMINSB128", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, - {name: "VPSIGNB128", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec128"}, - {name: "VPSUBB128", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, - {name: "VPABSB256", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, - {name: "VPADDB256", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, - {name: "VPANDN256", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQB256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTB256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec256"}, - {name: "VPABSBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, - {name: "VPMAXSB256", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, - {name: "VPMINSB256", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTB256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, - {name: "VPSIGNB256", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec256"}, - {name: "VPABSB512", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, - {name: "VPABSBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, - {name: "VPMAXSB512", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, - {name: "VPMINSB512", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTB512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, - {name: "VPSUBSB512", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, - {name: "VPSUBB512", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, - {name: "VPAVGW256", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, - {name: "VPAVGWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, - {name: "VPMAXUW256", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, - {name: "VPMINUW256", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, - {name: "VPMULHUW256", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, - {name: "VPHADDW256", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec256"}, - {name: "VPOPCNTW256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, - {name: "VPADDSW256", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, - {name: "VPAVGW512", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, - {name: "VPADDWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, - {name: "VPAVGWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, - {name: "VPMAXUW512", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, - {name: "VPMINUW512", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, - {name: "VPMULHUW512", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTW512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, - {name: "VPADDSW512", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, - {name: "VPSUBW512", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, - {name: "VPAVGW128", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, - {name: "VPAVGWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, - {name: "VPMAXUW128", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, - {name: "VPMINUW128", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, - {name: "VPMULHUW128", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, - {name: "VPHADDW128", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec128"}, - {name: "VPOPCNTW128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, - {name: "VPADDSW128", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, - {name: "VPSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPSUBW128", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, - {name: "VPADDD512", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, - {name: "VPANDND512", argLength: 2, reg: fp2fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, - {name: "VPADDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, - {name: "VPANDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, - {name: "VPORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, - {name: "VPMAXUD512", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, - {name: "VPMINUD512", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTD512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, - {name: "VPSUBD512", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, - {name: "VPADDD128", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, - {name: "VPADDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec128"}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, - {name: "VPMAXUD128", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, - {name: "VPMINUD128", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, - {name: "VPHADDD128", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec128"}, - {name: "VPOPCNTD128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, - {name: "VPADDD256", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, - {name: "VPADDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, - {name: "VPANDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec256"}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec256"}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec256"}, - {name: "VPMAXUD256", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, - {name: "VPMINUD256", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, - {name: "VPMULUDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, - {name: "VPHADDD256", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec256"}, - {name: "VPXOR256", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec256"}, - {name: "VPADDQ128", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, - {name: "VPADDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, - {name: "VPMAXUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, - {name: "VPMINUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, - {name: "VPMULUDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, - {name: "VPORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128"}, - {name: "VPMAXUQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, - {name: "VPMINUQ128", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, - {name: "VPMULUDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTQ128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, - {name: "VPSUBQ128", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, - {name: "VPXOR128", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec128"}, - {name: "VPADDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, - {name: "VPMAXUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, - {name: "VPMINUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, - {name: "VPMULUDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256"}, - {name: "VPMAXUQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, - {name: "VPMINUQ256", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, - {name: "VPADDQ512", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, - {name: "VPANDNQ512", argLength: 2, reg: fp2fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, - {name: "VPORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXUQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, - {name: "VPMINUQ512", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, - {name: "VPMULUDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, - {name: "VPORQ512", argLength: 2, reg: fp2fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, - {name: "VPANDN128", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec128"}, - {name: "VPAVGB128", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, - {name: "VPAVGBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, - {name: "VPMAXUB128", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, - {name: "VPMINUB128", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTB128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, - {name: "VPADDSB128", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBSB128", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, - {name: "VPAVGB256", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, - {name: "VPAVGBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, - {name: "VPMAXUB256", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, - {name: "VPMINUB256", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, - {name: "VPADDSB256", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBSB256", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, - {name: "VPSUBB256", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, - {name: "VPADDB512", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, - {name: "VPAVGB512", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, - {name: "VPADDBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, - {name: "VPAVGBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, - {name: "VPMAXUB512", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, - {name: "VPMINUB512", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, - {name: "VPADDSB512", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, - {name: "VCMPPS512", argLength: 2, reg: fp2m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPSMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPS128", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec128"}, - {name: "VCMPPSMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPS256", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec256"}, - {name: "VCMPPSMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPD128", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec128"}, - {name: "VCMPPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VCMPPD256", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec256"}, - {name: "VCMPPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VCMPPD512", argLength: 2, reg: fp2m1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPW256", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPW512", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPW128", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPD512", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPD128", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPD256", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPQ128", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQ256", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPQ512", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPB128", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPB256", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPB512", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW256", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW512", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW128", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUD512", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUD128", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUD256", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUQ128", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQ256", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUQ512", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB128", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB256", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB512", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPS128", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPS256", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPD128", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPD256", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSW256", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQW512", argLength: 2, reg: fp2k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTW512", argLength: 2, reg: fp2k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSW128", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQD512", argLength: 2, reg: fp2k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTD512", argLength: 2, reg: fp2k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTQ128", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQ512", argLength: 2, reg: fp2k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQ512", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQB512", argLength: 2, reg: fp2k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTB512", argLength: 2, reg: fp2k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: fp2k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 97a4a4825342db..c7abca814e9a00 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1202,6 +1202,7 @@ const ( OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 + OpAMD64VADDPSMasked512 OpAMD64VANDPSMasked512 OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 @@ -1213,7 +1214,6 @@ const ( OpAMD64VSCALEFPSMasked512 OpAMD64VORPSMasked512 OpAMD64VSQRTPSMasked512 - OpAMD64VADDPSMasked512 OpAMD64VXORPSMasked512 OpAMD64VMAXPS512 OpAMD64VMINPS512 @@ -1222,6 +1222,7 @@ const ( OpAMD64VORPS512 OpAMD64VSQRTPS512 OpAMD64VXORPS512 + OpAMD64VADDPS128 OpAMD64VANDPS128 OpAMD64VANDNPS128 OpAMD64VRCP14PS128 @@ -1248,7 +1249,6 @@ const ( OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 - OpAMD64VADDPS128 OpAMD64VXORPS128 OpAMD64VADDPS256 OpAMD64VANDPS256 @@ -1256,6 +1256,7 @@ const ( OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 + OpAMD64VADDPSMasked256 OpAMD64VANDPSMasked256 OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 @@ -1267,7 +1268,6 @@ const ( OpAMD64VSCALEFPSMasked256 OpAMD64VORPSMasked256 OpAMD64VSQRTPSMasked256 - OpAMD64VADDPSMasked256 OpAMD64VXORPSMasked256 OpAMD64VMAXPS256 OpAMD64VMINPS256 @@ -1312,6 +1312,7 @@ const ( OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 + OpAMD64VADDPDMasked256 OpAMD64VANDPDMasked256 OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 @@ -1323,7 +1324,6 @@ const ( OpAMD64VSCALEFPDMasked256 OpAMD64VORPDMasked256 OpAMD64VSQRTPDMasked256 - OpAMD64VADDPDMasked256 OpAMD64VXORPDMasked256 OpAMD64VMAXPD256 OpAMD64VMINPD256 @@ -1334,11 +1334,13 @@ const ( OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 OpAMD64VXORPD256 + OpAMD64VADDPD512 OpAMD64VANDPD512 OpAMD64VANDNPD512 OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 + OpAMD64VADDPDMasked512 OpAMD64VANDPDMasked512 OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 @@ -1350,7 +1352,6 @@ const ( OpAMD64VSCALEFPDMasked512 OpAMD64VORPDMasked512 OpAMD64VSQRTPDMasked512 - OpAMD64VADDPDMasked512 OpAMD64VXORPDMasked512 OpAMD64VMAXPD512 OpAMD64VMINPD512 @@ -1358,10 +1359,11 @@ const ( OpAMD64VSCALEFPD512 OpAMD64VORPD512 OpAMD64VSQRTPD512 - OpAMD64VADDPD512 OpAMD64VXORPD512 OpAMD64VPABSW256 OpAMD64VPADDW256 + OpAMD64VPAND256 + OpAMD64VPANDN256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 OpAMD64VPABSWMasked256 @@ -1372,6 +1374,7 @@ const ( OpAMD64VPMINSWMasked256 OpAMD64VPMULHWMasked256 OpAMD64VPMULLWMasked256 + OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSWMasked256 OpAMD64VPSUBSWMasked256 OpAMD64VPSUBWMasked256 @@ -1379,33 +1382,49 @@ const ( OpAMD64VPMINSW256 OpAMD64VPMULHW256 OpAMD64VPMULLW256 + OpAMD64VPOR256 + OpAMD64VPHADDW256 OpAMD64VPHSUBW256 + OpAMD64VPOPCNTW256 + OpAMD64VPADDSW256 OpAMD64VPHADDSW256 OpAMD64VPHSUBSW256 OpAMD64VPSUBSW256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 + OpAMD64VPXOR256 OpAMD64VPABSW512 OpAMD64VPADDW512 OpAMD64VPCMPEQW512 OpAMD64VPCMPGTW512 OpAMD64VPABSWMasked512 + OpAMD64VPADDWMasked512 OpAMD64VPCMPEQWMasked512 OpAMD64VPCMPGTWMasked512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSWMasked512 OpAMD64VPMULHWMasked512 OpAMD64VPMULLWMasked512 + OpAMD64VPOPCNTWMasked512 + OpAMD64VPADDSWMasked512 + OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBWMasked512 OpAMD64VPMAXSW512 OpAMD64VPMINSW512 OpAMD64VPMULHW512 OpAMD64VPMULLW512 + OpAMD64VPOPCNTW512 + OpAMD64VPADDSW512 OpAMD64VPSUBSW512 + OpAMD64VPSUBW512 OpAMD64VPABSW128 OpAMD64VPADDW128 + OpAMD64VPAND128 + OpAMD64VPANDN128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 OpAMD64VPABSWMasked128 + OpAMD64VPADDWMasked128 OpAMD64VPCMPEQWMasked128 OpAMD64VPCMPGTWMasked128 OpAMD64VPMAXSWMasked128 @@ -1413,21 +1432,40 @@ const ( OpAMD64VPMULHWMasked128 OpAMD64VPMULLWMasked128 OpAMD64VPOPCNTWMasked128 + OpAMD64VPADDSWMasked128 OpAMD64VPSUBSWMasked128 + OpAMD64VPSUBWMasked128 OpAMD64VPMAXSW128 OpAMD64VPMINSW128 OpAMD64VPMULHW128 OpAMD64VPMULLW128 + OpAMD64VPOR128 + OpAMD64VPHADDW128 OpAMD64VPHSUBW128 + OpAMD64VPOPCNTW128 + OpAMD64VPADDSW128 OpAMD64VPHADDSW128 OpAMD64VPHSUBSW128 + OpAMD64VPSUBSW128 OpAMD64VPSIGNW128 + OpAMD64VPSUBW128 + OpAMD64VPXOR128 OpAMD64VPABSD512 + OpAMD64VPADDD512 OpAMD64VPANDD512 + OpAMD64VPANDND512 + OpAMD64VPCMPEQD512 + OpAMD64VPCMPGTD512 OpAMD64VPABSDMasked512 + OpAMD64VPADDDMasked512 + OpAMD64VPANDDMasked512 + OpAMD64VPANDNDMasked512 + OpAMD64VPCMPEQDMasked512 + OpAMD64VPCMPGTDMasked512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSDMasked512 OpAMD64VPMULLDMasked512 + OpAMD64VPORDMasked512 OpAMD64VPOPCNTDMasked512 OpAMD64VPSUBDMasked512 OpAMD64VPXORDMasked512 @@ -1435,12 +1473,19 @@ const ( OpAMD64VPMINSD512 OpAMD64VPMULLD512 OpAMD64VPORD512 + OpAMD64VPOPCNTD512 + OpAMD64VPSUBD512 OpAMD64VPXORD512 OpAMD64VPABSD128 + OpAMD64VPADDD128 OpAMD64VPCMPEQD128 OpAMD64VPCMPGTD128 OpAMD64VPABSDMasked128 + OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 + OpAMD64VPANDNDMasked128 + OpAMD64VPCMPEQDMasked128 + OpAMD64VPCMPGTDMasked128 OpAMD64VPMAXSDMasked128 OpAMD64VPMINSDMasked128 OpAMD64VPMULLDMasked128 @@ -1450,31 +1495,45 @@ const ( OpAMD64VPXORDMasked128 OpAMD64VPMAXSD128 OpAMD64VPMINSD128 + OpAMD64VPMULDQ128 OpAMD64VPMULLD128 + OpAMD64VPHADDD128 OpAMD64VPHSUBD128 + OpAMD64VPOPCNTD128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 OpAMD64VPABSD256 - OpAMD64VPAND256 + OpAMD64VPADDD256 OpAMD64VPCMPEQD256 OpAMD64VPCMPGTD256 OpAMD64VPABSDMasked256 + OpAMD64VPADDDMasked256 + OpAMD64VPANDDMasked256 + OpAMD64VPANDNDMasked256 + OpAMD64VPCMPEQDMasked256 + OpAMD64VPCMPGTDMasked256 OpAMD64VPMAXSDMasked256 OpAMD64VPMINSDMasked256 OpAMD64VPMULLDMasked256 OpAMD64VPORDMasked256 + OpAMD64VPOPCNTDMasked256 OpAMD64VPSUBDMasked256 + OpAMD64VPXORDMasked256 OpAMD64VPMAXSD256 OpAMD64VPMINSD256 + OpAMD64VPMULDQ256 OpAMD64VPMULLD256 + OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 OpAMD64VPABSQ128 + OpAMD64VPADDQ128 OpAMD64VPCMPEQQ128 OpAMD64VPCMPGTQ128 OpAMD64VPABSQMasked128 + OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 OpAMD64VPCMPEQQMasked128 @@ -1483,17 +1542,21 @@ const ( OpAMD64VPMINSQMasked128 OpAMD64VPMULDQMasked128 OpAMD64VPMULLQMasked128 + OpAMD64VPORQMasked128 + OpAMD64VPOPCNTQMasked128 OpAMD64VPSUBQMasked128 + OpAMD64VPXORQMasked128 OpAMD64VPMAXSQ128 OpAMD64VPMINSQ128 - OpAMD64VPMULDQ128 OpAMD64VPMULLQ128 - OpAMD64VPOR128 + OpAMD64VPOPCNTQ128 + OpAMD64VPSUBQ128 OpAMD64VPABSQ256 OpAMD64VPADDQ256 OpAMD64VPCMPEQQ256 OpAMD64VPCMPGTQ256 OpAMD64VPABSQMasked256 + OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 OpAMD64VPCMPEQQMasked256 @@ -1505,19 +1568,21 @@ const ( OpAMD64VPORQMasked256 OpAMD64VPOPCNTQMasked256 OpAMD64VPSUBQMasked256 + OpAMD64VPXORQMasked256 OpAMD64VPMAXSQ256 OpAMD64VPMINSQ256 - OpAMD64VPMULDQ256 OpAMD64VPMULLQ256 - OpAMD64VPOR256 OpAMD64VPOPCNTQ256 OpAMD64VPSUBQ256 OpAMD64VPABSQ512 + OpAMD64VPADDQ512 OpAMD64VPANDQ512 + OpAMD64VPANDNQ512 OpAMD64VPCMPEQQ512 OpAMD64VPCMPGTQ512 OpAMD64VPABSQMasked512 OpAMD64VPADDQMasked512 + OpAMD64VPANDQMasked512 OpAMD64VPANDNQMasked512 OpAMD64VPCMPEQQMasked512 OpAMD64VPCMPGTQMasked512 @@ -1525,48 +1590,78 @@ const ( OpAMD64VPMINSQMasked512 OpAMD64VPMULDQMasked512 OpAMD64VPMULLQMasked512 + OpAMD64VPORQMasked512 + OpAMD64VPOPCNTQMasked512 + OpAMD64VPSUBQMasked512 + OpAMD64VPXORQMasked512 OpAMD64VPMAXSQ512 OpAMD64VPMINSQ512 OpAMD64VPMULDQ512 OpAMD64VPMULLQ512 + OpAMD64VPORQ512 OpAMD64VPOPCNTQ512 OpAMD64VPSUBQ512 OpAMD64VPXORQ512 OpAMD64VPABSB128 OpAMD64VPADDB128 - OpAMD64VPAND128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 OpAMD64VPABSBMasked128 OpAMD64VPADDBMasked128 + OpAMD64VPCMPEQBMasked128 + OpAMD64VPCMPGTBMasked128 OpAMD64VPMAXSBMasked128 OpAMD64VPMINSBMasked128 + OpAMD64VPOPCNTBMasked128 + OpAMD64VPADDSBMasked128 OpAMD64VPSUBSBMasked128 + OpAMD64VPSUBBMasked128 OpAMD64VPMAXSB128 OpAMD64VPMINSB128 + OpAMD64VPOPCNTB128 + OpAMD64VPADDSB128 + OpAMD64VPSUBSB128 OpAMD64VPSIGNB128 OpAMD64VPSUBB128 OpAMD64VPABSB256 OpAMD64VPADDB256 - OpAMD64VPANDN256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 OpAMD64VPABSBMasked256 + OpAMD64VPADDBMasked256 + OpAMD64VPCMPEQBMasked256 + OpAMD64VPCMPGTBMasked256 OpAMD64VPMAXSBMasked256 OpAMD64VPMINSBMasked256 + OpAMD64VPOPCNTBMasked256 + OpAMD64VPADDSBMasked256 OpAMD64VPSUBSBMasked256 + OpAMD64VPSUBBMasked256 OpAMD64VPMAXSB256 OpAMD64VPMINSB256 OpAMD64VPOPCNTB256 + OpAMD64VPADDSB256 + OpAMD64VPSUBSB256 OpAMD64VPSIGNB256 + OpAMD64VPSUBB256 OpAMD64VPABSB512 + OpAMD64VPADDB512 + OpAMD64VPCMPEQB512 + OpAMD64VPCMPGTB512 OpAMD64VPABSBMasked512 + OpAMD64VPADDBMasked512 + OpAMD64VPCMPEQBMasked512 + OpAMD64VPCMPGTBMasked512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSBMasked512 + OpAMD64VPOPCNTBMasked512 OpAMD64VPADDSBMasked512 + OpAMD64VPSUBSBMasked512 + OpAMD64VPSUBBMasked512 OpAMD64VPMAXSB512 OpAMD64VPMINSB512 OpAMD64VPOPCNTB512 + OpAMD64VPADDSB512 OpAMD64VPSUBSB512 OpAMD64VPSUBB512 OpAMD64VPAVGW256 @@ -1574,152 +1669,73 @@ const ( OpAMD64VPMAXUWMasked256 OpAMD64VPMINUWMasked256 OpAMD64VPMULHUWMasked256 - OpAMD64VPOPCNTWMasked256 OpAMD64VPMAXUW256 OpAMD64VPMINUW256 OpAMD64VPMULHUW256 - OpAMD64VPHADDW256 - OpAMD64VPOPCNTW256 - OpAMD64VPADDSW256 OpAMD64VPAVGW512 - OpAMD64VPADDWMasked512 OpAMD64VPAVGWMasked512 OpAMD64VPMAXUWMasked512 OpAMD64VPMINUWMasked512 OpAMD64VPMULHUWMasked512 - OpAMD64VPOPCNTWMasked512 - OpAMD64VPADDSWMasked512 - OpAMD64VPSUBSWMasked512 - OpAMD64VPSUBWMasked512 OpAMD64VPMAXUW512 OpAMD64VPMINUW512 OpAMD64VPMULHUW512 - OpAMD64VPOPCNTW512 - OpAMD64VPADDSW512 - OpAMD64VPSUBW512 OpAMD64VPAVGW128 - OpAMD64VPADDWMasked128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUWMasked128 OpAMD64VPMINUWMasked128 OpAMD64VPMULHUWMasked128 - OpAMD64VPADDSWMasked128 - OpAMD64VPSUBWMasked128 OpAMD64VPMAXUW128 OpAMD64VPMINUW128 OpAMD64VPMULHUW128 - OpAMD64VPHADDW128 - OpAMD64VPOPCNTW128 - OpAMD64VPADDSW128 - OpAMD64VPSUBSW128 - OpAMD64VPSUBW128 - OpAMD64VPADDD512 - OpAMD64VPANDND512 - OpAMD64VPADDDMasked512 - OpAMD64VPANDDMasked512 - OpAMD64VPANDNDMasked512 OpAMD64VPMAXUDMasked512 OpAMD64VPMINUDMasked512 - OpAMD64VPORDMasked512 OpAMD64VPMAXUD512 OpAMD64VPMINUD512 - OpAMD64VPOPCNTD512 - OpAMD64VPSUBD512 - OpAMD64VPADDD128 - OpAMD64VPADDDMasked128 - OpAMD64VPANDNDMasked128 OpAMD64VPMAXUDMasked128 OpAMD64VPMINUDMasked128 OpAMD64VPMAXUD128 OpAMD64VPMINUD128 - OpAMD64VPHADDD128 - OpAMD64VPOPCNTD128 - OpAMD64VPADDD256 - OpAMD64VPADDDMasked256 - OpAMD64VPANDDMasked256 - OpAMD64VPANDNDMasked256 + OpAMD64VPMULUDQ128 OpAMD64VPMAXUDMasked256 OpAMD64VPMINUDMasked256 - OpAMD64VPOPCNTDMasked256 - OpAMD64VPXORDMasked256 OpAMD64VPMAXUD256 OpAMD64VPMINUD256 OpAMD64VPMULUDQ256 - OpAMD64VPHADDD256 - OpAMD64VPXOR256 - OpAMD64VPADDQ128 - OpAMD64VPADDQMasked128 OpAMD64VPMAXUQMasked128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 - OpAMD64VPORQMasked128 - OpAMD64VPOPCNTQMasked128 - OpAMD64VPXORQMasked128 OpAMD64VPMAXUQ128 OpAMD64VPMINUQ128 - OpAMD64VPMULUDQ128 - OpAMD64VPOPCNTQ128 - OpAMD64VPSUBQ128 - OpAMD64VPXOR128 - OpAMD64VPADDQMasked256 OpAMD64VPMAXUQMasked256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 - OpAMD64VPXORQMasked256 OpAMD64VPMAXUQ256 OpAMD64VPMINUQ256 - OpAMD64VPADDQ512 - OpAMD64VPANDNQ512 - OpAMD64VPANDQMasked512 OpAMD64VPMAXUQMasked512 OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQMasked512 - OpAMD64VPORQMasked512 - OpAMD64VPOPCNTQMasked512 - OpAMD64VPSUBQMasked512 - OpAMD64VPXORQMasked512 OpAMD64VPMAXUQ512 OpAMD64VPMINUQ512 OpAMD64VPMULUDQ512 - OpAMD64VPORQ512 - OpAMD64VPANDN128 OpAMD64VPAVGB128 OpAMD64VPAVGBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 - OpAMD64VPOPCNTBMasked128 - OpAMD64VPADDSBMasked128 - OpAMD64VPSUBBMasked128 OpAMD64VPMAXUB128 OpAMD64VPMINUB128 - OpAMD64VPOPCNTB128 - OpAMD64VPADDSB128 - OpAMD64VPSUBSB128 OpAMD64VPAVGB256 - OpAMD64VPADDBMasked256 OpAMD64VPAVGBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 - OpAMD64VPOPCNTBMasked256 - OpAMD64VPADDSBMasked256 - OpAMD64VPSUBBMasked256 OpAMD64VPMAXUB256 OpAMD64VPMINUB256 - OpAMD64VPADDSB256 - OpAMD64VPSUBSB256 - OpAMD64VPSUBB256 - OpAMD64VPADDB512 OpAMD64VPAVGB512 - OpAMD64VPADDBMasked512 OpAMD64VPAVGBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 - OpAMD64VPOPCNTBMasked512 - OpAMD64VPSUBSBMasked512 - OpAMD64VPSUBBMasked512 OpAMD64VPMAXUB512 OpAMD64VPMINUB512 - OpAMD64VPADDSB512 OpAMD64VCMPPS512 OpAMD64VCMPPSMasked512 OpAMD64VCMPPS128 @@ -1734,26 +1750,26 @@ const ( OpAMD64VCMPPDMasked512 OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 - OpAMD64VPCMPWMasked512 OpAMD64VPCMPW512 + OpAMD64VPCMPWMasked512 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 - OpAMD64VPCMPDMasked128 OpAMD64VPCMPD128 + OpAMD64VPCMPDMasked128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 OpAMD64VPCMPQ256 OpAMD64VPCMPQMasked256 - OpAMD64VPCMPQMasked512 OpAMD64VPCMPQ512 - OpAMD64VPCMPBMasked128 + OpAMD64VPCMPQMasked512 OpAMD64VPCMPB128 - OpAMD64VPCMPBMasked256 + OpAMD64VPCMPBMasked128 OpAMD64VPCMPB256 + OpAMD64VPCMPBMasked256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 OpAMD64VPCMPUW256 @@ -1762,16 +1778,16 @@ const ( OpAMD64VPCMPUWMasked512 OpAMD64VPCMPUW128 OpAMD64VPCMPUWMasked128 - OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUD512 + OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked128 - OpAMD64VPCMPUDMasked256 OpAMD64VPCMPUD256 + OpAMD64VPCMPUDMasked256 OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked128 - OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 @@ -17758,6 +17774,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPSMasked512", argLen: 3, @@ -17926,21 +17958,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPSMasked512", - argLen: 3, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPSMasked512", argLen: 3, @@ -18059,6 +18076,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPS128", argLen: 2, @@ -18444,20 +18476,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPS128", - argLen: 2, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPS128", argLen: 2, @@ -18558,6 +18576,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPSMasked256", argLen: 3, @@ -18726,21 +18760,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPSMasked256", - argLen: 3, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPSMasked256", argLen: 3, @@ -19387,6 +19406,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPDMasked256", argLen: 3, @@ -19555,21 +19590,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPDMasked256", - argLen: 3, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPDMasked256", argLen: 3, @@ -19716,6 +19736,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPD512", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPD512", argLen: 2, @@ -19786,6 +19821,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPDMasked512", argLen: 3, @@ -19954,21 +20005,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPDMasked512", - argLen: 3, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPDMasked512", argLen: 3, @@ -20073,9 +20109,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", - argLen: 2, - asm: x86.AVADDPD, + name: "VXORPD512", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20087,10 +20124,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD512", + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDW256", argLen: 2, commutative: true, - asm: x86.AVXORPD, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20102,12 +20152,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, + name: "VPAND256", + argLen: 2, + commutative: true, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20115,10 +20167,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW256", + name: "VPANDN256", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20283,6 +20335,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTWMasked256", + argLen: 2, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDSWMasked256", argLen: 3, @@ -20389,6 +20455,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPHADDW256", + argLen: 2, + asm: x86.AVPHADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHSUBW256", argLen: 2, @@ -20403,6 +20498,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDSW256", argLen: 2, @@ -20473,6 +20596,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSW512", argLen: 1, @@ -20544,6 +20682,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQWMasked512", argLen: 3, @@ -20640,14 +20794,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPOPCNTWMasked512", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20655,14 +20808,75 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW512", - argLen: 2, + name: "VPADDSWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBWMasked512", + argLen: 3, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20699,6 +20913,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTW512", + argLen: 1, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBSW512", argLen: 2, @@ -20713,6 +20955,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSW128", argLen: 1, @@ -20741,6 +20997,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPAND128", + argLen: 2, + commutative: true, + asm: x86.AVPAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDN128", + argLen: 2, + commutative: true, + asm: x86.AVPANDN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQW128", argLen: 2, @@ -20784,6 +21070,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQWMasked128", argLen: 3, @@ -20893,6 +21195,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBSWMasked128", argLen: 3, @@ -20908,6 +21226,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBWMasked128", + argLen: 3, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSW128", argLen: 2, @@ -20969,9 +21302,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW128", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPOR128", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20983,9 +21317,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW128", + name: "VPHADDW128", argLen: 2, - asm: x86.AVPHADDSW, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20997,9 +21331,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW128", + name: "VPHSUBW128", argLen: 2, - asm: x86.AVPHSUBSW, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21011,13 +21345,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW128", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21025,12 +21358,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512", - argLen: 1, - asm: x86.AVPABSD, + name: "VPADDSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21038,10 +21373,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512", - argLen: 2, - commutative: true, - asm: x86.AVPANDD, + name: "VPHADDSW128", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21053,13 +21387,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked512", + name: "VPHSUBSW128", argLen: 2, - asm: x86.AVPABSD, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21067,15 +21401,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSUBSW128", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21083,15 +21415,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSIGNW128", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21099,15 +21429,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSUBW128", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21115,13 +21443,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21129,14 +21458,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked512", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPABSD512", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21144,15 +21471,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked512", - argLen: 3, + name: "VPADDD512", + argLen: 2, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21160,10 +21486,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD512", + name: "VPANDD512", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21175,10 +21501,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512", + name: "VPANDND512", argLen: 2, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21190,44 +21516,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512", + name: "VPCMPEQD512", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPORD512", - argLen: 2, - commutative: true, - asm: x86.AVPORD, + name: "VPCMPGTD512", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPXORD512", - argLen: 2, - commutative: true, - asm: x86.AVPXORD, + name: "VPABSDMasked512", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21235,12 +21559,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD128", - argLen: 1, - asm: x86.AVPABSD, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21248,14 +21575,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD128", - argLen: 2, + name: "VPANDDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21263,13 +21591,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD128", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPANDNDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21277,24 +21607,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128", - argLen: 2, - asm: x86.AVPABSD, + name: "VPCMPEQDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPANDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPCMPGTDMasked512", + argLen: 3, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21302,12 +21633,12 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSDMasked128", + name: "VPMAXSDMasked512", argLen: 3, commutative: true, asm: x86.AVPMAXSD, @@ -21323,7 +21654,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked128", + name: "VPMINSDMasked512", argLen: 3, commutative: true, asm: x86.AVPMINSD, @@ -21339,7 +21670,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128", + name: "VPMULLDMasked512", argLen: 3, commutative: true, asm: x86.AVPMULLD, @@ -21355,7 +21686,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked128", + name: "VPORDMasked512", argLen: 3, commutative: true, asm: x86.AVPORD, @@ -21371,7 +21702,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128", + name: "VPOPCNTDMasked512", argLen: 2, asm: x86.AVPOPCNTD, reg: regInfo{ @@ -21385,7 +21716,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128", + name: "VPSUBDMasked512", argLen: 3, asm: x86.AVPSUBD, reg: regInfo{ @@ -21400,7 +21731,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128", + name: "VPXORDMasked512", argLen: 3, commutative: true, asm: x86.AVPXORD, @@ -21416,7 +21747,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD128", + name: "VPMAXSD512", argLen: 2, commutative: true, asm: x86.AVPMAXSD, @@ -21431,7 +21762,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD128", + name: "VPMINSD512", argLen: 2, commutative: true, asm: x86.AVPMINSD, @@ -21446,7 +21777,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD128", + name: "VPMULLD512", argLen: 2, commutative: true, asm: x86.AVPMULLD, @@ -21461,9 +21792,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD128", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPORD512", + argLen: 2, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21475,13 +21807,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND128", - argLen: 2, - asm: x86.AVPSIGND, + name: "VPOPCNTD512", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21489,7 +21820,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD128", + name: "VPSUBD512", argLen: 2, asm: x86.AVPSUBD, reg: regInfo{ @@ -21503,7 +21834,22 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD256", + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSD128", argLen: 1, asm: x86.AVPABSD, reg: regInfo{ @@ -21516,10 +21862,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", + name: "VPADDD128", argLen: 2, commutative: true, - asm: x86.AVPAND, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21531,7 +21877,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD256", + name: "VPCMPEQD128", argLen: 2, commutative: true, asm: x86.AVPCMPEQD, @@ -21546,7 +21892,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD256", + name: "VPCMPGTD128", argLen: 2, asm: x86.AVPCMPGTD, reg: regInfo{ @@ -21560,7 +21906,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256", + name: "VPABSDMasked128", argLen: 2, asm: x86.AVPABSD, reg: regInfo{ @@ -21574,10 +21920,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked256", + name: "VPADDDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21590,10 +21936,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked256", + name: "VPANDDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21606,10 +21952,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256", + name: "VPANDNDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21622,10 +21968,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked256", + name: "VPCMPEQDMasked128", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21633,14 +21979,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSUBDMasked256", + name: "VPCMPGTDMasked128", argLen: 3, - asm: x86.AVPSUBD, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21648,19 +21994,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSD256", - argLen: 2, + name: "VPMAXSDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21668,14 +22015,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD256", - argLen: 2, + name: "VPMINSDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21683,14 +22031,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD256", - argLen: 2, + name: "VPMULLDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21698,13 +22047,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD256", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21712,12 +22063,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD256", - argLen: 1, + name: "VPOPCNTDMasked128", + argLen: 2, asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21725,94 +22077,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND256", - argLen: 2, - asm: x86.AVPSIGND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBD256", - argLen: 2, + name: "VPSUBDMasked128", + argLen: 3, asm: x86.AVPSUBD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSQ128", - argLen: 1, - asm: x86.AVPABSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQQ128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTQ128", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPABSQMasked128", - argLen: 2, - asm: x86.AVPABSQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPANDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21825,10 +22092,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128", + name: "VPXORDMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21841,46 +22108,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked128", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPMAXSQMasked128", - argLen: 3, + name: "VPMAXSD128", + argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21888,15 +22123,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked128", - argLen: 3, + name: "VPMINSD128", + argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21904,15 +22138,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked128", - argLen: 3, + name: "VPMULDQ128", + argLen: 2, commutative: true, asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21920,30 +22153,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked128", - argLen: 3, + name: "VPMULLD128", + argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQMasked128", - argLen: 3, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21951,10 +22168,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPHADDD128", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21966,10 +22182,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPHSUBD128", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21981,14 +22196,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPOPCNTD128", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21996,10 +22209,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22011,10 +22223,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSUBD128", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22026,9 +22237,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", + name: "VPABSD256", argLen: 1, - asm: x86.AVPABSQ, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22039,10 +22250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ256", + name: "VPADDD256", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22054,10 +22265,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ256", + name: "VPCMPEQD256", argLen: 2, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22069,9 +22280,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", + name: "VPCMPGTD256", argLen: 2, - asm: x86.AVPCMPGTQ, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22083,9 +22294,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", + name: "VPABSDMasked256", argLen: 2, - asm: x86.AVPABSQ, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22097,10 +22308,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256", + name: "VPADDDMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22113,10 +22324,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", + name: "VPANDDMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22129,10 +22340,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked256", + name: "VPANDNDMasked256", argLen: 3, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22140,14 +22351,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTQMasked256", - argLen: 3, - asm: x86.AVPCMPGTQ, + name: "VPCMPEQDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22160,10 +22372,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPCMPGTDMasked256", + argLen: 3, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22171,15 +22382,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSQMasked256", + name: "VPMAXSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22192,10 +22403,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked256", + name: "VPMINSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22208,10 +22419,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", + name: "VPMULLDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22224,10 +22435,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", + name: "VPORDMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22240,9 +22451,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256", + name: "VPOPCNTDMasked256", argLen: 2, - asm: x86.AVPOPCNTQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22254,9 +22465,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", + name: "VPSUBDMasked256", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22269,10 +22480,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", + name: "VPXORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSD256", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22284,10 +22511,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", + name: "VPMINSD256", argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22314,10 +22541,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", + name: "VPMULLD256", argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22329,10 +22556,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPHADDD256", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22344,9 +22570,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", + name: "VPHSUBD256", + argLen: 2, + asm: x86.AVPHSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPOPCNTD256", argLen: 1, - asm: x86.AVPOPCNTQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22357,9 +22597,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", + name: "VPSIGND256", + argLen: 2, + asm: x86.AVPSIGND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBD256", argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22371,7 +22625,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512", + name: "VPABSQ128", argLen: 1, asm: x86.AVPABSQ, reg: regInfo{ @@ -22384,10 +22638,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", + name: "VPADDQ128", argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22399,7 +22653,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ512", + name: "VPCMPEQQ128", argLen: 2, commutative: true, asm: x86.AVPCMPEQQ, @@ -22409,12 +22663,12 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTQ512", + name: "VPCMPGTQ128", argLen: 2, asm: x86.AVPCMPGTQ, reg: regInfo{ @@ -22428,7 +22682,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", + name: "VPABSQMasked128", argLen: 2, asm: x86.AVPABSQ, reg: regInfo{ @@ -22442,7 +22696,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512", + name: "VPADDQMasked128", argLen: 3, commutative: true, asm: x86.AVPADDQ, @@ -22458,7 +22712,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", + name: "VPANDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDNQMasked128", argLen: 3, commutative: true, asm: x86.AVPANDNQ, @@ -22474,7 +22744,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked512", + name: "VPCMPEQQMasked128", argLen: 3, commutative: true, asm: x86.AVPCMPEQQ, @@ -22490,7 +22760,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQMasked512", + name: "VPCMPGTQMasked128", argLen: 3, asm: x86.AVPCMPGTQ, reg: regInfo{ @@ -22505,7 +22775,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPMAXSQMasked128", argLen: 3, commutative: true, asm: x86.AVPMAXSQ, @@ -22521,7 +22791,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512", + name: "VPMINSQMasked128", argLen: 3, commutative: true, asm: x86.AVPMINSQ, @@ -22537,7 +22807,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked512", + name: "VPMULDQMasked128", argLen: 3, commutative: true, asm: x86.AVPMULDQ, @@ -22553,7 +22823,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", + name: "VPMULLQMasked128", argLen: 3, commutative: true, asm: x86.AVPMULLQ, @@ -22569,173 +22839,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPAND128", - argLen: 2, - commutative: true, - asm: x86.AVPAND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQB128", - argLen: 2, + name: "VPORQMasked128", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTB128", - argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22743,9 +22855,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", + name: "VPOPCNTQMasked128", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22757,10 +22869,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22773,10 +22884,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked128", + name: "VPXORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22789,30 +22900,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, + name: "VPMAXSQ128", + argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22820,10 +22915,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", + name: "VPMINSQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22835,10 +22930,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", + name: "VPMULLQ128", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22850,13 +22945,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22864,9 +22958,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", + name: "VPSUBQ128", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22878,9 +22972,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB256", + name: "VPABSQ256", argLen: 1, - asm: x86.AVPABSB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22891,10 +22985,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB256", + name: "VPADDQ256", argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22906,10 +23000,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", + name: "VPCMPEQQ256", argLen: 2, commutative: true, - asm: x86.AVPANDN, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22921,10 +23015,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22936,13 +23029,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", + name: "VPABSQMasked256", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22950,13 +23043,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, + name: "VPADDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22964,10 +23059,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked256", + name: "VPANDQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22980,10 +23075,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", + name: "VPANDNQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22996,9 +23091,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", + name: "VPCMPEQQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTQMasked256", argLen: 3, - asm: x86.AVPSUBSB, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23006,19 +23117,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSB256", - argLen: 2, + name: "VPMAXSQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23026,14 +23138,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", - argLen: 2, + name: "VPMINSQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23041,12 +23154,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPMULDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23054,13 +23170,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB256", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPMULLQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23068,12 +23186,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", - argLen: 1, - asm: x86.AVPABSB, + name: "VPORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23081,9 +23202,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked512", + name: "VPOPCNTQMasked256", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23095,10 +23216,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23111,10 +23231,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked512", + name: "VPXORQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23127,15 +23247,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", - argLen: 3, + name: "VPMAXSQ256", + argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23143,10 +23262,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB512", + name: "VPMINSQ256", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23158,10 +23277,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", + name: "VPMULLQ256", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23173,9 +23292,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", + name: "VPOPCNTQ256", argLen: 1, - asm: x86.AVPOPCNTB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23186,42 +23305,114 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", + name: "VPSUBQ256", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDNQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPEQQ512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSUBB512", + name: "VPCMPGTQ512", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPAVGW256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPABSQMasked512", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23229,10 +23420,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked256", + name: "VPADDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23245,10 +23436,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", + name: "VPANDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23261,10 +23452,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", + name: "VPANDNQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23277,10 +23468,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked256", + name: "VPCMPEQQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23288,33 +23479,35 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTWMasked256", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPCMPGTQMasked512", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUW256", - argLen: 2, + name: "VPMAXSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23322,14 +23515,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", - argLen: 2, + name: "VPMINSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23337,14 +23531,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, + name: "VPMULDQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23352,13 +23547,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW256", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPMULLQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23366,12 +23563,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW256", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23379,14 +23579,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPOPCNTQMasked512", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23394,14 +23593,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23409,10 +23608,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked512", + name: "VPXORQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23425,15 +23624,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked512", - argLen: 3, + name: "VPMAXSQ512", + argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23441,15 +23639,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, + name: "VPMINSQ512", + argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23457,15 +23654,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", - argLen: 3, + name: "VPMULDQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23473,15 +23669,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked512", - argLen: 3, + name: "VPMULLQ512", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23489,13 +23684,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked512", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23503,15 +23699,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23519,14 +23712,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked512", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23534,14 +23726,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked512", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPXORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23549,14 +23741,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPABSB128", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23564,10 +23754,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW512", + name: "VPADDB128", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23579,10 +23769,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW512", + name: "VPCMPEQB128", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23594,12 +23784,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW512", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23607,14 +23798,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23622,13 +23812,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW512", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23636,25 +23828,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW128", - argLen: 2, + name: "VPCMPEQBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPCMPGTBMasked128", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23662,15 +23854,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPAVGWMasked128", + name: "VPMAXSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23683,10 +23875,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked128", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23699,15 +23891,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23715,10 +23905,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", + name: "VPADDSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23731,10 +23921,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23747,9 +23936,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked128", + name: "VPSUBBMasked128", argLen: 3, - asm: x86.AVPSUBW, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23762,10 +23951,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", + name: "VPMAXSB128", argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23777,10 +23966,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW128", + name: "VPMINSB128", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23792,10 +23981,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSB128", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23807,9 +24009,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW128", + name: "VPSUBSB128", argLen: 2, - asm: x86.AVPHADDW, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23821,12 +24023,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW128", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPSIGNB128", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23834,10 +24037,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23849,13 +24051,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW128", - argLen: 2, - asm: x86.AVPSUBSW, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23863,9 +24064,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW128", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23877,10 +24079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512", + name: "VPCMPEQB256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23892,10 +24094,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - commutative: true, - asm: x86.AVPANDND, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23907,15 +24108,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23923,10 +24122,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked512", + name: "VPADDBMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23939,10 +24138,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512", + name: "VPCMPEQBMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDND, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23950,15 +24149,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPCMPGTBMasked256", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23966,15 +24164,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINUDMasked512", + name: "VPMAXSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23987,10 +24185,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked512", + name: "VPMINSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24003,14 +24201,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24018,14 +24215,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", - argLen: 2, + name: "VPADDSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24033,22 +24231,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD512", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPSUBBMasked256", + argLen: 3, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBD512", - argLen: 2, - asm: x86.AVPSUBD, + name: "VPMAXSB256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24060,10 +24276,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD128", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24075,15 +24291,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24091,15 +24304,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", - argLen: 3, + name: "VPADDSB256", + argLen: 2, commutative: true, - asm: x86.AVPANDND, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24107,15 +24333,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24123,15 +24347,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24139,14 +24361,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24154,10 +24374,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", + name: "VPADDB512", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24169,41 +24389,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD128", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPCMPEQB512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTD128", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPCMPGTB512", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDD256", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24211,10 +24432,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked256", + name: "VPADDBMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24227,10 +24448,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked256", + name: "VPCMPEQBMasked512", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24238,15 +24459,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPANDNDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPCMPGTBMasked512", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24254,15 +24474,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUDMasked256", + name: "VPMAXSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24275,10 +24495,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", + name: "VPMINSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24291,9 +24511,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256", + name: "VPOPCNTBMasked512", argLen: 2, - asm: x86.AVPOPCNTD, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24305,10 +24525,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256", + name: "VPADDSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24321,14 +24541,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24336,14 +24556,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBBMasked512", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24351,10 +24571,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", + name: "VPMAXSB512", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24366,9 +24586,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD256", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPMINSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24380,14 +24601,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR256", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24395,10 +24614,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ128", + name: "VPADDSB512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24410,15 +24629,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDQ, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24426,15 +24643,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24442,15 +24657,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked128", - argLen: 3, + name: "VPAVGW256", + argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24458,10 +24672,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked128", + name: "VPAVGWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24474,10 +24688,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked128", + name: "VPMAXUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24490,24 +24704,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked128", - argLen: 2, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPXORQMasked128", + name: "VPMINUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24520,14 +24720,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", - argLen: 2, + name: "VPMULHUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24535,10 +24736,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", + name: "VPMAXUW256", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24550,37 +24751,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ128", + name: "VPMINUW256", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQ128", - argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24592,10 +24766,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR128", + name: "VPMULHUW256", argLen: 2, commutative: true, - asm: x86.AVPXOR, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24607,15 +24781,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked256", - argLen: 3, + name: "VPAVGW512", + argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24623,10 +24796,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256", + name: "VPAVGWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24639,10 +24812,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked256", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24655,10 +24828,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked256", + name: "VPMINUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24671,10 +24844,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256", + name: "VPMULHUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24687,10 +24860,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", + name: "VPMAXUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24702,10 +24875,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ256", + name: "VPMINUW512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24717,10 +24890,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", + name: "VPMULHUW512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24732,10 +24905,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", + name: "VPAVGW128", argLen: 2, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24747,10 +24920,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512", + name: "VPAVGWMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24763,10 +24936,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked512", + name: "VPMAXUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24779,10 +24952,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24795,10 +24968,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked512", + name: "VPMULHUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24811,15 +24984,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", - argLen: 3, + name: "VPMAXUW128", + argLen: 2, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24827,13 +24999,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPMINUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24841,9 +25014,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24856,10 +25045,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512", + name: "VPMINUDMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24872,10 +25061,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", + name: "VPMAXUD512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24887,10 +25076,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", + name: "VPMINUD512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24902,14 +25091,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", - argLen: 2, + name: "VPMAXUDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24917,10 +25123,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512", + name: "VPMAXUD128", argLen: 2, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24932,10 +25138,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", + name: "VPMINUD128", argLen: 2, commutative: true, - asm: x86.AVPANDN, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24947,10 +25153,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", + name: "VPMULUDQ128", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24962,10 +25168,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked128", + name: "VPMAXUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24978,10 +25184,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", + name: "VPMINUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24994,15 +25200,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", - argLen: 3, + name: "VPMAXUD256", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25010,13 +25215,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked128", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULUDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25024,10 +25261,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", + name: "VPMINUQMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25040,9 +25277,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPMULUDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25055,10 +25293,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", + name: "VPMAXUQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25070,10 +25308,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", + name: "VPMINUQ128", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25085,27 +25323,47 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDSB128", - argLen: 2, + name: "VPMULUDQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25113,9 +25371,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMAXUQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25127,10 +25386,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", + name: "VPMINUQ256", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25142,10 +25401,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", + name: "VPMAXUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25158,10 +25417,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked256", + name: "VPMINUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25174,10 +25433,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", + name: "VPMULUDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25190,15 +25449,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked256", - argLen: 3, + name: "VPMAXUQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25206,13 +25464,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked256", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25220,15 +25479,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", - argLen: 3, + name: "VPMULUDQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25236,14 +25494,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked256", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPAVGB128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25251,14 +25509,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", - argLen: 2, + name: "VPAVGBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25266,14 +25525,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", - argLen: 2, + name: "VPMAXUBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25281,14 +25541,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", - argLen: 2, + name: "VPMINUBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25296,9 +25557,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25310,9 +25572,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB256", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPMINUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25324,10 +25587,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", + name: "VPAVGB256", argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25339,14 +25602,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", - argLen: 2, + name: "VPAVGBMasked256", + argLen: 3, commutative: true, asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25354,10 +25618,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked512", + name: "VPMAXUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25370,10 +25634,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked512", + name: "VPMINUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25386,15 +25650,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", - argLen: 3, + name: "VPMAXUB256", + argLen: 2, commutative: true, asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25402,15 +25665,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked512", - argLen: 3, + name: "VPMINUB256", + argLen: 2, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25418,13 +25680,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked512", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25432,9 +25695,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked512", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPAVGBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25447,9 +25711,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25462,14 +25727,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", - argLen: 2, + name: "VPMINUBMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25477,10 +25743,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", + name: "VPMAXUB512", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25492,10 +25758,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", + name: "VPMINUB512", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25507,10 +25773,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25522,10 +25789,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25538,10 +25806,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25553,10 +25822,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25569,10 +25839,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25584,10 +25855,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25600,10 +25872,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25632,10 +25905,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25664,10 +25938,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25727,15 +26002,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked512", + name: "VPCMPW512", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25743,15 +26017,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25774,11 +26048,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25806,11 +26079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25823,15 +26095,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128", + name: "VPCMPD128", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25839,15 +26110,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25870,11 +26141,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25949,11 +26219,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25966,11 +26250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25998,11 +26281,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPB, + name: "VPCMPB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26029,22 +26311,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPB256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPB512", auxType: auxInt8, @@ -26077,10 +26343,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUW, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26092,10 +26359,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUW, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26124,10 +26392,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUW, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26140,10 +26409,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUW, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26172,15 +26442,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26188,15 +26458,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD512", + name: "VPCMPUDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26204,10 +26475,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUD, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26219,10 +26491,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26235,15 +26508,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26251,15 +26524,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD256", + name: "VPCMPUDMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26267,10 +26541,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26282,10 +26557,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26298,15 +26574,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26314,15 +26590,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ256", + name: "VPCMPUQMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26330,10 +26607,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26345,10 +26623,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26361,10 +26640,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26376,10 +26656,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26392,10 +26673,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26407,10 +26689,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26423,10 +26706,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26438,10 +26722,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 88c90dce82ae21..86fbc988cfad2f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -554,29 +554,41 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64XORQmodify: return rewriteValueAMD64_OpAMD64XORQmodify(v) case OpAbsoluteInt16x16: - return rewriteValueAMD64_OpAbsoluteInt16x16(v) + v.Op = OpAMD64VPABSW256 + return true case OpAbsoluteInt16x32: - return rewriteValueAMD64_OpAbsoluteInt16x32(v) + v.Op = OpAMD64VPABSW512 + return true case OpAbsoluteInt16x8: - return rewriteValueAMD64_OpAbsoluteInt16x8(v) + v.Op = OpAMD64VPABSW128 + return true case OpAbsoluteInt32x16: - return rewriteValueAMD64_OpAbsoluteInt32x16(v) + v.Op = OpAMD64VPABSD512 + return true case OpAbsoluteInt32x4: - return rewriteValueAMD64_OpAbsoluteInt32x4(v) + v.Op = OpAMD64VPABSD128 + return true case OpAbsoluteInt32x8: - return rewriteValueAMD64_OpAbsoluteInt32x8(v) + v.Op = OpAMD64VPABSD256 + return true case OpAbsoluteInt64x2: - return rewriteValueAMD64_OpAbsoluteInt64x2(v) + v.Op = OpAMD64VPABSQ128 + return true case OpAbsoluteInt64x4: - return rewriteValueAMD64_OpAbsoluteInt64x4(v) + v.Op = OpAMD64VPABSQ256 + return true case OpAbsoluteInt64x8: - return rewriteValueAMD64_OpAbsoluteInt64x8(v) + v.Op = OpAMD64VPABSQ512 + return true case OpAbsoluteInt8x16: - return rewriteValueAMD64_OpAbsoluteInt8x16(v) + v.Op = OpAMD64VPABSB128 + return true case OpAbsoluteInt8x32: - return rewriteValueAMD64_OpAbsoluteInt8x32(v) + v.Op = OpAMD64VPABSB256 + return true case OpAbsoluteInt8x64: - return rewriteValueAMD64_OpAbsoluteInt8x64(v) + v.Op = OpAMD64VPABSB512 + return true case OpAdd16: v.Op = OpAMD64ADDL return true @@ -596,68 +608,98 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ADDL return true case OpAddFloat32x16: - return rewriteValueAMD64_OpAddFloat32x16(v) + v.Op = OpAMD64VADDPS512 + return true case OpAddFloat32x4: - return rewriteValueAMD64_OpAddFloat32x4(v) + v.Op = OpAMD64VADDPS128 + return true case OpAddFloat32x8: - return rewriteValueAMD64_OpAddFloat32x8(v) + v.Op = OpAMD64VADDPS256 + return true case OpAddFloat64x2: - return rewriteValueAMD64_OpAddFloat64x2(v) + v.Op = OpAMD64VADDPD128 + return true case OpAddFloat64x4: - return rewriteValueAMD64_OpAddFloat64x4(v) + v.Op = OpAMD64VADDPD256 + return true case OpAddFloat64x8: - return rewriteValueAMD64_OpAddFloat64x8(v) + v.Op = OpAMD64VADDPD512 + return true case OpAddInt16x16: - return rewriteValueAMD64_OpAddInt16x16(v) + v.Op = OpAMD64VPADDW256 + return true case OpAddInt16x32: - return rewriteValueAMD64_OpAddInt16x32(v) + v.Op = OpAMD64VPADDW512 + return true case OpAddInt16x8: - return rewriteValueAMD64_OpAddInt16x8(v) + v.Op = OpAMD64VPADDW128 + return true case OpAddInt32x16: - return rewriteValueAMD64_OpAddInt32x16(v) + v.Op = OpAMD64VPADDD512 + return true case OpAddInt32x4: - return rewriteValueAMD64_OpAddInt32x4(v) + v.Op = OpAMD64VPADDD128 + return true case OpAddInt32x8: - return rewriteValueAMD64_OpAddInt32x8(v) + v.Op = OpAMD64VPADDD256 + return true case OpAddInt64x2: - return rewriteValueAMD64_OpAddInt64x2(v) + v.Op = OpAMD64VPADDQ128 + return true case OpAddInt64x4: - return rewriteValueAMD64_OpAddInt64x4(v) + v.Op = OpAMD64VPADDQ256 + return true case OpAddInt64x8: - return rewriteValueAMD64_OpAddInt64x8(v) + v.Op = OpAMD64VPADDQ512 + return true case OpAddInt8x16: - return rewriteValueAMD64_OpAddInt8x16(v) + v.Op = OpAMD64VPADDB128 + return true case OpAddInt8x32: - return rewriteValueAMD64_OpAddInt8x32(v) + v.Op = OpAMD64VPADDB256 + return true case OpAddInt8x64: - return rewriteValueAMD64_OpAddInt8x64(v) + v.Op = OpAMD64VPADDB512 + return true case OpAddPtr: v.Op = OpAMD64ADDQ return true case OpAddUint16x16: - return rewriteValueAMD64_OpAddUint16x16(v) + v.Op = OpAMD64VPADDW256 + return true case OpAddUint16x32: - return rewriteValueAMD64_OpAddUint16x32(v) + v.Op = OpAMD64VPADDW512 + return true case OpAddUint16x8: - return rewriteValueAMD64_OpAddUint16x8(v) + v.Op = OpAMD64VPADDW128 + return true case OpAddUint32x16: - return rewriteValueAMD64_OpAddUint32x16(v) + v.Op = OpAMD64VPADDD512 + return true case OpAddUint32x4: - return rewriteValueAMD64_OpAddUint32x4(v) + v.Op = OpAMD64VPADDD128 + return true case OpAddUint32x8: - return rewriteValueAMD64_OpAddUint32x8(v) + v.Op = OpAMD64VPADDD256 + return true case OpAddUint64x2: - return rewriteValueAMD64_OpAddUint64x2(v) + v.Op = OpAMD64VPADDQ128 + return true case OpAddUint64x4: - return rewriteValueAMD64_OpAddUint64x4(v) + v.Op = OpAMD64VPADDQ256 + return true case OpAddUint64x8: - return rewriteValueAMD64_OpAddUint64x8(v) + v.Op = OpAMD64VPADDQ512 + return true case OpAddUint8x16: - return rewriteValueAMD64_OpAddUint8x16(v) + v.Op = OpAMD64VPADDB128 + return true case OpAddUint8x32: - return rewriteValueAMD64_OpAddUint8x32(v) + v.Op = OpAMD64VPADDB256 + return true case OpAddUint8x64: - return rewriteValueAMD64_OpAddUint8x64(v) + v.Op = OpAMD64VPADDB512 + return true case OpAddr: return rewriteValueAMD64_OpAddr(v) case OpAnd16: @@ -676,133 +718,197 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ANDL return true case OpAndFloat32x16: - return rewriteValueAMD64_OpAndFloat32x16(v) + v.Op = OpAMD64VANDPS512 + return true case OpAndFloat32x4: - return rewriteValueAMD64_OpAndFloat32x4(v) + v.Op = OpAMD64VANDPS128 + return true case OpAndFloat32x8: - return rewriteValueAMD64_OpAndFloat32x8(v) + v.Op = OpAMD64VANDPS256 + return true case OpAndFloat64x2: - return rewriteValueAMD64_OpAndFloat64x2(v) + v.Op = OpAMD64VANDPD128 + return true case OpAndFloat64x4: - return rewriteValueAMD64_OpAndFloat64x4(v) + v.Op = OpAMD64VANDPD256 + return true case OpAndFloat64x8: - return rewriteValueAMD64_OpAndFloat64x8(v) + v.Op = OpAMD64VANDPD512 + return true case OpAndInt16x16: - return rewriteValueAMD64_OpAndInt16x16(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt16x8: - return rewriteValueAMD64_OpAndInt16x8(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt32x16: - return rewriteValueAMD64_OpAndInt32x16(v) + v.Op = OpAMD64VPANDD512 + return true case OpAndInt32x4: - return rewriteValueAMD64_OpAndInt32x4(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt32x8: - return rewriteValueAMD64_OpAndInt32x8(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt64x2: - return rewriteValueAMD64_OpAndInt64x2(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt64x4: - return rewriteValueAMD64_OpAndInt64x4(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt64x8: - return rewriteValueAMD64_OpAndInt64x8(v) + v.Op = OpAMD64VPANDQ512 + return true case OpAndInt8x16: - return rewriteValueAMD64_OpAndInt8x16(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt8x32: - return rewriteValueAMD64_OpAndInt8x32(v) + v.Op = OpAMD64VPAND256 + return true case OpAndNotFloat32x16: - return rewriteValueAMD64_OpAndNotFloat32x16(v) + v.Op = OpAMD64VANDNPS512 + return true case OpAndNotFloat32x4: - return rewriteValueAMD64_OpAndNotFloat32x4(v) + v.Op = OpAMD64VANDNPS128 + return true case OpAndNotFloat32x8: - return rewriteValueAMD64_OpAndNotFloat32x8(v) + v.Op = OpAMD64VANDNPS256 + return true case OpAndNotFloat64x2: - return rewriteValueAMD64_OpAndNotFloat64x2(v) + v.Op = OpAMD64VANDNPD128 + return true case OpAndNotFloat64x4: - return rewriteValueAMD64_OpAndNotFloat64x4(v) + v.Op = OpAMD64VANDNPD256 + return true case OpAndNotFloat64x8: - return rewriteValueAMD64_OpAndNotFloat64x8(v) + v.Op = OpAMD64VANDNPD512 + return true case OpAndNotInt16x16: - return rewriteValueAMD64_OpAndNotInt16x16(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt16x8: - return rewriteValueAMD64_OpAndNotInt16x8(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt32x16: - return rewriteValueAMD64_OpAndNotInt32x16(v) + v.Op = OpAMD64VPANDND512 + return true case OpAndNotInt32x4: - return rewriteValueAMD64_OpAndNotInt32x4(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt32x8: - return rewriteValueAMD64_OpAndNotInt32x8(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt64x2: - return rewriteValueAMD64_OpAndNotInt64x2(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt64x4: - return rewriteValueAMD64_OpAndNotInt64x4(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt64x8: - return rewriteValueAMD64_OpAndNotInt64x8(v) + v.Op = OpAMD64VPANDNQ512 + return true case OpAndNotInt8x16: - return rewriteValueAMD64_OpAndNotInt8x16(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt8x32: - return rewriteValueAMD64_OpAndNotInt8x32(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint16x16: - return rewriteValueAMD64_OpAndNotUint16x16(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint16x8: - return rewriteValueAMD64_OpAndNotUint16x8(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint32x16: - return rewriteValueAMD64_OpAndNotUint32x16(v) + v.Op = OpAMD64VPANDND512 + return true case OpAndNotUint32x4: - return rewriteValueAMD64_OpAndNotUint32x4(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint32x8: - return rewriteValueAMD64_OpAndNotUint32x8(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint64x2: - return rewriteValueAMD64_OpAndNotUint64x2(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint64x4: - return rewriteValueAMD64_OpAndNotUint64x4(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint64x8: - return rewriteValueAMD64_OpAndNotUint64x8(v) + v.Op = OpAMD64VPANDNQ512 + return true case OpAndNotUint8x16: - return rewriteValueAMD64_OpAndNotUint8x16(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint8x32: - return rewriteValueAMD64_OpAndNotUint8x32(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndUint16x16: - return rewriteValueAMD64_OpAndUint16x16(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint16x8: - return rewriteValueAMD64_OpAndUint16x8(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint32x16: - return rewriteValueAMD64_OpAndUint32x16(v) + v.Op = OpAMD64VPANDD512 + return true case OpAndUint32x4: - return rewriteValueAMD64_OpAndUint32x4(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint32x8: - return rewriteValueAMD64_OpAndUint32x8(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint64x2: - return rewriteValueAMD64_OpAndUint64x2(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint64x4: - return rewriteValueAMD64_OpAndUint64x4(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint64x8: - return rewriteValueAMD64_OpAndUint64x8(v) + v.Op = OpAMD64VPANDQ512 + return true case OpAndUint8x16: - return rewriteValueAMD64_OpAndUint8x16(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint8x32: - return rewriteValueAMD64_OpAndUint8x32(v) + v.Op = OpAMD64VPAND256 + return true case OpApproximateReciprocalFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v) + v.Op = OpAMD64VRCP14PS512 + return true case OpApproximateReciprocalFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v) + v.Op = OpAMD64VRCP14PS128 + return true case OpApproximateReciprocalFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v) + v.Op = OpAMD64VRCP14PS256 + return true case OpApproximateReciprocalFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v) + v.Op = OpAMD64VRCP14PD128 + return true case OpApproximateReciprocalFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v) + v.Op = OpAMD64VRCP14PD256 + return true case OpApproximateReciprocalFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v) + v.Op = OpAMD64VRCP14PD512 + return true case OpApproximateReciprocalOfSqrtFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v) + v.Op = OpAMD64VRSQRT14PS512 + return true case OpApproximateReciprocalOfSqrtFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v) + v.Op = OpAMD64VRSQRTPS128 + return true case OpApproximateReciprocalOfSqrtFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v) + v.Op = OpAMD64VRSQRTPS256 + return true case OpApproximateReciprocalOfSqrtFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v) + v.Op = OpAMD64VRSQRT14PD128 + return true case OpApproximateReciprocalOfSqrtFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v) + v.Op = OpAMD64VRSQRT14PD256 + return true case OpApproximateReciprocalOfSqrtFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v) + v.Op = OpAMD64VRSQRT14PD512 + return true case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -850,17 +956,23 @@ func rewriteValueAMD64(v *Value) bool { case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) case OpAverageUint16x16: - return rewriteValueAMD64_OpAverageUint16x16(v) + v.Op = OpAMD64VPAVGW256 + return true case OpAverageUint16x32: - return rewriteValueAMD64_OpAverageUint16x32(v) + v.Op = OpAMD64VPAVGW512 + return true case OpAverageUint16x8: - return rewriteValueAMD64_OpAverageUint16x8(v) + v.Op = OpAMD64VPAVGW128 + return true case OpAverageUint8x16: - return rewriteValueAMD64_OpAverageUint8x16(v) + v.Op = OpAMD64VPAVGB128 + return true case OpAverageUint8x32: - return rewriteValueAMD64_OpAverageUint8x32(v) + v.Op = OpAMD64VPAVGB256 + return true case OpAverageUint8x64: - return rewriteValueAMD64_OpAverageUint8x64(v) + v.Op = OpAMD64VPAVGB512 + return true case OpAvg64u: v.Op = OpAMD64AVGQU return true @@ -994,17 +1106,23 @@ func rewriteValueAMD64(v *Value) bool { case OpDiv8u: return rewriteValueAMD64_OpDiv8u(v) case OpDivFloat32x16: - return rewriteValueAMD64_OpDivFloat32x16(v) + v.Op = OpAMD64VDIVPS512 + return true case OpDivFloat32x4: - return rewriteValueAMD64_OpDivFloat32x4(v) + v.Op = OpAMD64VDIVPS128 + return true case OpDivFloat32x8: - return rewriteValueAMD64_OpDivFloat32x8(v) + v.Op = OpAMD64VDIVPS256 + return true case OpDivFloat64x2: - return rewriteValueAMD64_OpDivFloat64x2(v) + v.Op = OpAMD64VDIVPD128 + return true case OpDivFloat64x4: - return rewriteValueAMD64_OpDivFloat64x4(v) + v.Op = OpAMD64VDIVPD256 + return true case OpDivFloat64x8: - return rewriteValueAMD64_OpDivFloat64x8(v) + v.Op = OpAMD64VDIVPD512 + return true case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -1034,27 +1152,35 @@ func rewriteValueAMD64(v *Value) bool { case OpEqualFloat64x8: return rewriteValueAMD64_OpEqualFloat64x8(v) case OpEqualInt16x16: - return rewriteValueAMD64_OpEqualInt16x16(v) + v.Op = OpAMD64VPCMPEQW256 + return true case OpEqualInt16x32: return rewriteValueAMD64_OpEqualInt16x32(v) case OpEqualInt16x8: - return rewriteValueAMD64_OpEqualInt16x8(v) + v.Op = OpAMD64VPCMPEQW128 + return true case OpEqualInt32x16: return rewriteValueAMD64_OpEqualInt32x16(v) case OpEqualInt32x4: - return rewriteValueAMD64_OpEqualInt32x4(v) + v.Op = OpAMD64VPCMPEQD128 + return true case OpEqualInt32x8: - return rewriteValueAMD64_OpEqualInt32x8(v) + v.Op = OpAMD64VPCMPEQD256 + return true case OpEqualInt64x2: - return rewriteValueAMD64_OpEqualInt64x2(v) + v.Op = OpAMD64VPCMPEQQ128 + return true case OpEqualInt64x4: - return rewriteValueAMD64_OpEqualInt64x4(v) + v.Op = OpAMD64VPCMPEQQ256 + return true case OpEqualInt64x8: return rewriteValueAMD64_OpEqualInt64x8(v) case OpEqualInt8x16: - return rewriteValueAMD64_OpEqualInt8x16(v) + v.Op = OpAMD64VPCMPEQB128 + return true case OpEqualInt8x32: - return rewriteValueAMD64_OpEqualInt8x32(v) + v.Op = OpAMD64VPCMPEQB256 + return true case OpEqualInt8x64: return rewriteValueAMD64_OpEqualInt8x64(v) case OpEqualUint16x16: @@ -1169,27 +1295,34 @@ func rewriteValueAMD64(v *Value) bool { case OpGreaterFloat64x8: return rewriteValueAMD64_OpGreaterFloat64x8(v) case OpGreaterInt16x16: - return rewriteValueAMD64_OpGreaterInt16x16(v) + v.Op = OpAMD64VPCMPGTW256 + return true case OpGreaterInt16x32: return rewriteValueAMD64_OpGreaterInt16x32(v) case OpGreaterInt16x8: - return rewriteValueAMD64_OpGreaterInt16x8(v) + v.Op = OpAMD64VPCMPGTW128 + return true case OpGreaterInt32x16: return rewriteValueAMD64_OpGreaterInt32x16(v) case OpGreaterInt32x4: - return rewriteValueAMD64_OpGreaterInt32x4(v) + v.Op = OpAMD64VPCMPGTD128 + return true case OpGreaterInt32x8: - return rewriteValueAMD64_OpGreaterInt32x8(v) + v.Op = OpAMD64VPCMPGTD256 + return true case OpGreaterInt64x2: return rewriteValueAMD64_OpGreaterInt64x2(v) case OpGreaterInt64x4: - return rewriteValueAMD64_OpGreaterInt64x4(v) + v.Op = OpAMD64VPCMPGTQ256 + return true case OpGreaterInt64x8: return rewriteValueAMD64_OpGreaterInt64x8(v) case OpGreaterInt8x16: - return rewriteValueAMD64_OpGreaterInt8x16(v) + v.Op = OpAMD64VPCMPGTB128 + return true case OpGreaterInt8x32: - return rewriteValueAMD64_OpGreaterInt8x32(v) + v.Op = OpAMD64VPCMPGTB256 + return true case OpGreaterInt8x64: return rewriteValueAMD64_OpGreaterInt8x64(v) case OpGreaterUint16x16: @@ -2454,129 +2587,189 @@ func rewriteValueAMD64(v *Value) bool { case OpMax64F: return rewriteValueAMD64_OpMax64F(v) case OpMaxFloat32x16: - return rewriteValueAMD64_OpMaxFloat32x16(v) + v.Op = OpAMD64VMAXPS512 + return true case OpMaxFloat32x4: - return rewriteValueAMD64_OpMaxFloat32x4(v) + v.Op = OpAMD64VMAXPS128 + return true case OpMaxFloat32x8: - return rewriteValueAMD64_OpMaxFloat32x8(v) + v.Op = OpAMD64VMAXPS256 + return true case OpMaxFloat64x2: - return rewriteValueAMD64_OpMaxFloat64x2(v) + v.Op = OpAMD64VMAXPD128 + return true case OpMaxFloat64x4: - return rewriteValueAMD64_OpMaxFloat64x4(v) + v.Op = OpAMD64VMAXPD256 + return true case OpMaxFloat64x8: - return rewriteValueAMD64_OpMaxFloat64x8(v) + v.Op = OpAMD64VMAXPD512 + return true case OpMaxInt16x16: - return rewriteValueAMD64_OpMaxInt16x16(v) + v.Op = OpAMD64VPMAXSW256 + return true case OpMaxInt16x32: - return rewriteValueAMD64_OpMaxInt16x32(v) + v.Op = OpAMD64VPMAXSW512 + return true case OpMaxInt16x8: - return rewriteValueAMD64_OpMaxInt16x8(v) + v.Op = OpAMD64VPMAXSW128 + return true case OpMaxInt32x16: - return rewriteValueAMD64_OpMaxInt32x16(v) + v.Op = OpAMD64VPMAXSD512 + return true case OpMaxInt32x4: - return rewriteValueAMD64_OpMaxInt32x4(v) + v.Op = OpAMD64VPMAXSD128 + return true case OpMaxInt32x8: - return rewriteValueAMD64_OpMaxInt32x8(v) + v.Op = OpAMD64VPMAXSD256 + return true case OpMaxInt64x2: - return rewriteValueAMD64_OpMaxInt64x2(v) + v.Op = OpAMD64VPMAXSQ128 + return true case OpMaxInt64x4: - return rewriteValueAMD64_OpMaxInt64x4(v) + v.Op = OpAMD64VPMAXSQ256 + return true case OpMaxInt64x8: - return rewriteValueAMD64_OpMaxInt64x8(v) + v.Op = OpAMD64VPMAXSQ512 + return true case OpMaxInt8x16: - return rewriteValueAMD64_OpMaxInt8x16(v) + v.Op = OpAMD64VPMAXSB128 + return true case OpMaxInt8x32: - return rewriteValueAMD64_OpMaxInt8x32(v) + v.Op = OpAMD64VPMAXSB256 + return true case OpMaxInt8x64: - return rewriteValueAMD64_OpMaxInt8x64(v) + v.Op = OpAMD64VPMAXSB512 + return true case OpMaxUint16x16: - return rewriteValueAMD64_OpMaxUint16x16(v) + v.Op = OpAMD64VPMAXUW256 + return true case OpMaxUint16x32: - return rewriteValueAMD64_OpMaxUint16x32(v) + v.Op = OpAMD64VPMAXUW512 + return true case OpMaxUint16x8: - return rewriteValueAMD64_OpMaxUint16x8(v) + v.Op = OpAMD64VPMAXUW128 + return true case OpMaxUint32x16: - return rewriteValueAMD64_OpMaxUint32x16(v) + v.Op = OpAMD64VPMAXUD512 + return true case OpMaxUint32x4: - return rewriteValueAMD64_OpMaxUint32x4(v) + v.Op = OpAMD64VPMAXUD128 + return true case OpMaxUint32x8: - return rewriteValueAMD64_OpMaxUint32x8(v) + v.Op = OpAMD64VPMAXUD256 + return true case OpMaxUint64x2: - return rewriteValueAMD64_OpMaxUint64x2(v) + v.Op = OpAMD64VPMAXUQ128 + return true case OpMaxUint64x4: - return rewriteValueAMD64_OpMaxUint64x4(v) + v.Op = OpAMD64VPMAXUQ256 + return true case OpMaxUint64x8: - return rewriteValueAMD64_OpMaxUint64x8(v) + v.Op = OpAMD64VPMAXUQ512 + return true case OpMaxUint8x16: - return rewriteValueAMD64_OpMaxUint8x16(v) + v.Op = OpAMD64VPMAXUB128 + return true case OpMaxUint8x32: - return rewriteValueAMD64_OpMaxUint8x32(v) + v.Op = OpAMD64VPMAXUB256 + return true case OpMaxUint8x64: - return rewriteValueAMD64_OpMaxUint8x64(v) + v.Op = OpAMD64VPMAXUB512 + return true case OpMin32F: return rewriteValueAMD64_OpMin32F(v) case OpMin64F: return rewriteValueAMD64_OpMin64F(v) case OpMinFloat32x16: - return rewriteValueAMD64_OpMinFloat32x16(v) + v.Op = OpAMD64VMINPS512 + return true case OpMinFloat32x4: - return rewriteValueAMD64_OpMinFloat32x4(v) + v.Op = OpAMD64VMINPS128 + return true case OpMinFloat32x8: - return rewriteValueAMD64_OpMinFloat32x8(v) + v.Op = OpAMD64VMINPS256 + return true case OpMinFloat64x2: - return rewriteValueAMD64_OpMinFloat64x2(v) + v.Op = OpAMD64VMINPD128 + return true case OpMinFloat64x4: - return rewriteValueAMD64_OpMinFloat64x4(v) + v.Op = OpAMD64VMINPD256 + return true case OpMinFloat64x8: - return rewriteValueAMD64_OpMinFloat64x8(v) + v.Op = OpAMD64VMINPD512 + return true case OpMinInt16x16: - return rewriteValueAMD64_OpMinInt16x16(v) + v.Op = OpAMD64VPMINSW256 + return true case OpMinInt16x32: - return rewriteValueAMD64_OpMinInt16x32(v) + v.Op = OpAMD64VPMINSW512 + return true case OpMinInt16x8: - return rewriteValueAMD64_OpMinInt16x8(v) + v.Op = OpAMD64VPMINSW128 + return true case OpMinInt32x16: - return rewriteValueAMD64_OpMinInt32x16(v) + v.Op = OpAMD64VPMINSD512 + return true case OpMinInt32x4: - return rewriteValueAMD64_OpMinInt32x4(v) + v.Op = OpAMD64VPMINSD128 + return true case OpMinInt32x8: - return rewriteValueAMD64_OpMinInt32x8(v) + v.Op = OpAMD64VPMINSD256 + return true case OpMinInt64x2: - return rewriteValueAMD64_OpMinInt64x2(v) + v.Op = OpAMD64VPMINSQ128 + return true case OpMinInt64x4: - return rewriteValueAMD64_OpMinInt64x4(v) + v.Op = OpAMD64VPMINSQ256 + return true case OpMinInt64x8: - return rewriteValueAMD64_OpMinInt64x8(v) + v.Op = OpAMD64VPMINSQ512 + return true case OpMinInt8x16: - return rewriteValueAMD64_OpMinInt8x16(v) + v.Op = OpAMD64VPMINSB128 + return true case OpMinInt8x32: - return rewriteValueAMD64_OpMinInt8x32(v) + v.Op = OpAMD64VPMINSB256 + return true case OpMinInt8x64: - return rewriteValueAMD64_OpMinInt8x64(v) + v.Op = OpAMD64VPMINSB512 + return true case OpMinUint16x16: - return rewriteValueAMD64_OpMinUint16x16(v) + v.Op = OpAMD64VPMINUW256 + return true case OpMinUint16x32: - return rewriteValueAMD64_OpMinUint16x32(v) + v.Op = OpAMD64VPMINUW512 + return true case OpMinUint16x8: - return rewriteValueAMD64_OpMinUint16x8(v) + v.Op = OpAMD64VPMINUW128 + return true case OpMinUint32x16: - return rewriteValueAMD64_OpMinUint32x16(v) + v.Op = OpAMD64VPMINUD512 + return true case OpMinUint32x4: - return rewriteValueAMD64_OpMinUint32x4(v) + v.Op = OpAMD64VPMINUD128 + return true case OpMinUint32x8: - return rewriteValueAMD64_OpMinUint32x8(v) + v.Op = OpAMD64VPMINUD256 + return true case OpMinUint64x2: - return rewriteValueAMD64_OpMinUint64x2(v) + v.Op = OpAMD64VPMINUQ128 + return true case OpMinUint64x4: - return rewriteValueAMD64_OpMinUint64x4(v) + v.Op = OpAMD64VPMINUQ256 + return true case OpMinUint64x8: - return rewriteValueAMD64_OpMinUint64x8(v) + v.Op = OpAMD64VPMINUQ512 + return true case OpMinUint8x16: - return rewriteValueAMD64_OpMinUint8x16(v) + v.Op = OpAMD64VPMINUB128 + return true case OpMinUint8x32: - return rewriteValueAMD64_OpMinUint8x32(v) + v.Op = OpAMD64VPMINUB256 + return true case OpMinUint8x64: - return rewriteValueAMD64_OpMinUint8x64(v) + v.Op = OpAMD64VPMINUB512 + return true case OpMod16: return rewriteValueAMD64_OpMod16(v) case OpMod16u: @@ -2617,79 +2810,116 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64MULL return true case OpMulByPowOf2Float32x16: - return rewriteValueAMD64_OpMulByPowOf2Float32x16(v) + v.Op = OpAMD64VSCALEFPS512 + return true case OpMulByPowOf2Float32x4: - return rewriteValueAMD64_OpMulByPowOf2Float32x4(v) + v.Op = OpAMD64VSCALEFPS128 + return true case OpMulByPowOf2Float32x8: - return rewriteValueAMD64_OpMulByPowOf2Float32x8(v) + v.Op = OpAMD64VSCALEFPS256 + return true case OpMulByPowOf2Float64x2: - return rewriteValueAMD64_OpMulByPowOf2Float64x2(v) + v.Op = OpAMD64VSCALEFPD128 + return true case OpMulByPowOf2Float64x4: - return rewriteValueAMD64_OpMulByPowOf2Float64x4(v) + v.Op = OpAMD64VSCALEFPD256 + return true case OpMulByPowOf2Float64x8: - return rewriteValueAMD64_OpMulByPowOf2Float64x8(v) + v.Op = OpAMD64VSCALEFPD512 + return true case OpMulEvenWidenInt32x4: - return rewriteValueAMD64_OpMulEvenWidenInt32x4(v) + v.Op = OpAMD64VPMULDQ128 + return true case OpMulEvenWidenInt32x8: - return rewriteValueAMD64_OpMulEvenWidenInt32x8(v) + v.Op = OpAMD64VPMULDQ256 + return true case OpMulEvenWidenInt64x2: - return rewriteValueAMD64_OpMulEvenWidenInt64x2(v) + v.Op = OpAMD64VPMULDQ128 + return true case OpMulEvenWidenInt64x4: - return rewriteValueAMD64_OpMulEvenWidenInt64x4(v) + v.Op = OpAMD64VPMULDQ256 + return true case OpMulEvenWidenInt64x8: - return rewriteValueAMD64_OpMulEvenWidenInt64x8(v) + v.Op = OpAMD64VPMULDQ512 + return true case OpMulEvenWidenUint32x4: - return rewriteValueAMD64_OpMulEvenWidenUint32x4(v) + v.Op = OpAMD64VPMULUDQ128 + return true case OpMulEvenWidenUint32x8: - return rewriteValueAMD64_OpMulEvenWidenUint32x8(v) + v.Op = OpAMD64VPMULUDQ256 + return true case OpMulEvenWidenUint64x2: - return rewriteValueAMD64_OpMulEvenWidenUint64x2(v) + v.Op = OpAMD64VPMULUDQ128 + return true case OpMulEvenWidenUint64x4: - return rewriteValueAMD64_OpMulEvenWidenUint64x4(v) + v.Op = OpAMD64VPMULUDQ256 + return true case OpMulEvenWidenUint64x8: - return rewriteValueAMD64_OpMulEvenWidenUint64x8(v) + v.Op = OpAMD64VPMULUDQ512 + return true case OpMulFloat32x16: - return rewriteValueAMD64_OpMulFloat32x16(v) + v.Op = OpAMD64VMULPS512 + return true case OpMulFloat32x4: - return rewriteValueAMD64_OpMulFloat32x4(v) + v.Op = OpAMD64VMULPS128 + return true case OpMulFloat32x8: - return rewriteValueAMD64_OpMulFloat32x8(v) + v.Op = OpAMD64VMULPS256 + return true case OpMulFloat64x2: - return rewriteValueAMD64_OpMulFloat64x2(v) + v.Op = OpAMD64VMULPD128 + return true case OpMulFloat64x4: - return rewriteValueAMD64_OpMulFloat64x4(v) + v.Op = OpAMD64VMULPD256 + return true case OpMulFloat64x8: - return rewriteValueAMD64_OpMulFloat64x8(v) + v.Op = OpAMD64VMULPD512 + return true case OpMulHighInt16x16: - return rewriteValueAMD64_OpMulHighInt16x16(v) + v.Op = OpAMD64VPMULHW256 + return true case OpMulHighInt16x32: - return rewriteValueAMD64_OpMulHighInt16x32(v) + v.Op = OpAMD64VPMULHW512 + return true case OpMulHighInt16x8: - return rewriteValueAMD64_OpMulHighInt16x8(v) + v.Op = OpAMD64VPMULHW128 + return true case OpMulHighUint16x16: - return rewriteValueAMD64_OpMulHighUint16x16(v) + v.Op = OpAMD64VPMULHUW256 + return true case OpMulHighUint16x32: - return rewriteValueAMD64_OpMulHighUint16x32(v) + v.Op = OpAMD64VPMULHUW512 + return true case OpMulHighUint16x8: - return rewriteValueAMD64_OpMulHighUint16x8(v) + v.Op = OpAMD64VPMULHUW128 + return true case OpMulLowInt16x16: - return rewriteValueAMD64_OpMulLowInt16x16(v) + v.Op = OpAMD64VPMULLW256 + return true case OpMulLowInt16x32: - return rewriteValueAMD64_OpMulLowInt16x32(v) + v.Op = OpAMD64VPMULLW512 + return true case OpMulLowInt16x8: - return rewriteValueAMD64_OpMulLowInt16x8(v) + v.Op = OpAMD64VPMULLW128 + return true case OpMulLowInt32x16: - return rewriteValueAMD64_OpMulLowInt32x16(v) + v.Op = OpAMD64VPMULLD512 + return true case OpMulLowInt32x4: - return rewriteValueAMD64_OpMulLowInt32x4(v) + v.Op = OpAMD64VPMULLD128 + return true case OpMulLowInt32x8: - return rewriteValueAMD64_OpMulLowInt32x8(v) + v.Op = OpAMD64VPMULLD256 + return true case OpMulLowInt64x2: - return rewriteValueAMD64_OpMulLowInt64x2(v) + v.Op = OpAMD64VPMULLQ128 + return true case OpMulLowInt64x4: - return rewriteValueAMD64_OpMulLowInt64x4(v) + v.Op = OpAMD64VPMULLQ256 + return true case OpMulLowInt64x8: - return rewriteValueAMD64_OpMulLowInt64x8(v) + v.Op = OpAMD64VPMULLQ512 + return true case OpNeg16: v.Op = OpAMD64NEGL return true @@ -2805,105 +3035,155 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ORL return true case OpOrFloat32x16: - return rewriteValueAMD64_OpOrFloat32x16(v) + v.Op = OpAMD64VORPS512 + return true case OpOrFloat32x4: - return rewriteValueAMD64_OpOrFloat32x4(v) + v.Op = OpAMD64VORPS128 + return true case OpOrFloat32x8: - return rewriteValueAMD64_OpOrFloat32x8(v) + v.Op = OpAMD64VORPS256 + return true case OpOrFloat64x2: - return rewriteValueAMD64_OpOrFloat64x2(v) + v.Op = OpAMD64VORPD128 + return true case OpOrFloat64x4: - return rewriteValueAMD64_OpOrFloat64x4(v) + v.Op = OpAMD64VORPD256 + return true case OpOrFloat64x8: - return rewriteValueAMD64_OpOrFloat64x8(v) + v.Op = OpAMD64VORPD512 + return true case OpOrInt16x16: - return rewriteValueAMD64_OpOrInt16x16(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt16x8: - return rewriteValueAMD64_OpOrInt16x8(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt32x16: - return rewriteValueAMD64_OpOrInt32x16(v) + v.Op = OpAMD64VPORD512 + return true case OpOrInt32x4: - return rewriteValueAMD64_OpOrInt32x4(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt32x8: - return rewriteValueAMD64_OpOrInt32x8(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt64x2: - return rewriteValueAMD64_OpOrInt64x2(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt64x4: - return rewriteValueAMD64_OpOrInt64x4(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt64x8: - return rewriteValueAMD64_OpOrInt64x8(v) + v.Op = OpAMD64VPORQ512 + return true case OpOrInt8x16: - return rewriteValueAMD64_OpOrInt8x16(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt8x32: - return rewriteValueAMD64_OpOrInt8x32(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint16x16: - return rewriteValueAMD64_OpOrUint16x16(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint16x8: - return rewriteValueAMD64_OpOrUint16x8(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint32x16: - return rewriteValueAMD64_OpOrUint32x16(v) + v.Op = OpAMD64VPORD512 + return true case OpOrUint32x4: - return rewriteValueAMD64_OpOrUint32x4(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint32x8: - return rewriteValueAMD64_OpOrUint32x8(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint64x2: - return rewriteValueAMD64_OpOrUint64x2(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint64x4: - return rewriteValueAMD64_OpOrUint64x4(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint64x8: - return rewriteValueAMD64_OpOrUint64x8(v) + v.Op = OpAMD64VPORQ512 + return true case OpOrUint8x16: - return rewriteValueAMD64_OpOrUint8x16(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint8x32: - return rewriteValueAMD64_OpOrUint8x32(v) + v.Op = OpAMD64VPOR256 + return true case OpPairwiseAddFloat32x4: - return rewriteValueAMD64_OpPairwiseAddFloat32x4(v) + v.Op = OpAMD64VHADDPS128 + return true case OpPairwiseAddFloat32x8: - return rewriteValueAMD64_OpPairwiseAddFloat32x8(v) + v.Op = OpAMD64VHADDPS256 + return true case OpPairwiseAddFloat64x2: - return rewriteValueAMD64_OpPairwiseAddFloat64x2(v) + v.Op = OpAMD64VHADDPD128 + return true case OpPairwiseAddFloat64x4: - return rewriteValueAMD64_OpPairwiseAddFloat64x4(v) + v.Op = OpAMD64VHADDPD256 + return true case OpPairwiseAddInt16x16: - return rewriteValueAMD64_OpPairwiseAddInt16x16(v) + v.Op = OpAMD64VPHADDW256 + return true case OpPairwiseAddInt16x8: - return rewriteValueAMD64_OpPairwiseAddInt16x8(v) + v.Op = OpAMD64VPHADDW128 + return true case OpPairwiseAddInt32x4: - return rewriteValueAMD64_OpPairwiseAddInt32x4(v) + v.Op = OpAMD64VPHADDD128 + return true case OpPairwiseAddInt32x8: - return rewriteValueAMD64_OpPairwiseAddInt32x8(v) + v.Op = OpAMD64VPHADDD256 + return true case OpPairwiseAddUint16x16: - return rewriteValueAMD64_OpPairwiseAddUint16x16(v) + v.Op = OpAMD64VPHADDW256 + return true case OpPairwiseAddUint16x8: - return rewriteValueAMD64_OpPairwiseAddUint16x8(v) + v.Op = OpAMD64VPHADDW128 + return true case OpPairwiseAddUint32x4: - return rewriteValueAMD64_OpPairwiseAddUint32x4(v) + v.Op = OpAMD64VPHADDD128 + return true case OpPairwiseAddUint32x8: - return rewriteValueAMD64_OpPairwiseAddUint32x8(v) + v.Op = OpAMD64VPHADDD256 + return true case OpPairwiseSubFloat32x4: - return rewriteValueAMD64_OpPairwiseSubFloat32x4(v) + v.Op = OpAMD64VHSUBPS128 + return true case OpPairwiseSubFloat32x8: - return rewriteValueAMD64_OpPairwiseSubFloat32x8(v) + v.Op = OpAMD64VHSUBPS256 + return true case OpPairwiseSubFloat64x2: - return rewriteValueAMD64_OpPairwiseSubFloat64x2(v) + v.Op = OpAMD64VHSUBPD128 + return true case OpPairwiseSubFloat64x4: - return rewriteValueAMD64_OpPairwiseSubFloat64x4(v) + v.Op = OpAMD64VHSUBPD256 + return true case OpPairwiseSubInt16x16: - return rewriteValueAMD64_OpPairwiseSubInt16x16(v) + v.Op = OpAMD64VPHSUBW256 + return true case OpPairwiseSubInt16x8: - return rewriteValueAMD64_OpPairwiseSubInt16x8(v) + v.Op = OpAMD64VPHSUBW128 + return true case OpPairwiseSubInt32x4: - return rewriteValueAMD64_OpPairwiseSubInt32x4(v) + v.Op = OpAMD64VPHSUBD128 + return true case OpPairwiseSubInt32x8: - return rewriteValueAMD64_OpPairwiseSubInt32x8(v) + v.Op = OpAMD64VPHSUBD256 + return true case OpPairwiseSubUint16x16: - return rewriteValueAMD64_OpPairwiseSubUint16x16(v) + v.Op = OpAMD64VPHSUBW256 + return true case OpPairwiseSubUint16x8: - return rewriteValueAMD64_OpPairwiseSubUint16x8(v) + v.Op = OpAMD64VPHSUBW128 + return true case OpPairwiseSubUint32x4: - return rewriteValueAMD64_OpPairwiseSubUint32x4(v) + v.Op = OpAMD64VPHSUBD128 + return true case OpPairwiseSubUint32x8: - return rewriteValueAMD64_OpPairwiseSubUint32x8(v) + v.Op = OpAMD64VPHSUBD256 + return true case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) case OpPopCount16: @@ -2917,53 +3197,77 @@ func rewriteValueAMD64(v *Value) bool { case OpPopCount8: return rewriteValueAMD64_OpPopCount8(v) case OpPopCountInt16x16: - return rewriteValueAMD64_OpPopCountInt16x16(v) + v.Op = OpAMD64VPOPCNTW256 + return true case OpPopCountInt16x32: - return rewriteValueAMD64_OpPopCountInt16x32(v) + v.Op = OpAMD64VPOPCNTW512 + return true case OpPopCountInt16x8: - return rewriteValueAMD64_OpPopCountInt16x8(v) + v.Op = OpAMD64VPOPCNTW128 + return true case OpPopCountInt32x16: - return rewriteValueAMD64_OpPopCountInt32x16(v) + v.Op = OpAMD64VPOPCNTD512 + return true case OpPopCountInt32x4: - return rewriteValueAMD64_OpPopCountInt32x4(v) + v.Op = OpAMD64VPOPCNTD128 + return true case OpPopCountInt32x8: - return rewriteValueAMD64_OpPopCountInt32x8(v) + v.Op = OpAMD64VPOPCNTD256 + return true case OpPopCountInt64x2: - return rewriteValueAMD64_OpPopCountInt64x2(v) + v.Op = OpAMD64VPOPCNTQ128 + return true case OpPopCountInt64x4: - return rewriteValueAMD64_OpPopCountInt64x4(v) + v.Op = OpAMD64VPOPCNTQ256 + return true case OpPopCountInt64x8: - return rewriteValueAMD64_OpPopCountInt64x8(v) + v.Op = OpAMD64VPOPCNTQ512 + return true case OpPopCountInt8x16: - return rewriteValueAMD64_OpPopCountInt8x16(v) + v.Op = OpAMD64VPOPCNTB128 + return true case OpPopCountInt8x32: - return rewriteValueAMD64_OpPopCountInt8x32(v) + v.Op = OpAMD64VPOPCNTB256 + return true case OpPopCountInt8x64: - return rewriteValueAMD64_OpPopCountInt8x64(v) + v.Op = OpAMD64VPOPCNTB512 + return true case OpPopCountUint16x16: - return rewriteValueAMD64_OpPopCountUint16x16(v) + v.Op = OpAMD64VPOPCNTW256 + return true case OpPopCountUint16x32: - return rewriteValueAMD64_OpPopCountUint16x32(v) + v.Op = OpAMD64VPOPCNTW512 + return true case OpPopCountUint16x8: - return rewriteValueAMD64_OpPopCountUint16x8(v) + v.Op = OpAMD64VPOPCNTW128 + return true case OpPopCountUint32x16: - return rewriteValueAMD64_OpPopCountUint32x16(v) + v.Op = OpAMD64VPOPCNTD512 + return true case OpPopCountUint32x4: - return rewriteValueAMD64_OpPopCountUint32x4(v) + v.Op = OpAMD64VPOPCNTD128 + return true case OpPopCountUint32x8: - return rewriteValueAMD64_OpPopCountUint32x8(v) + v.Op = OpAMD64VPOPCNTD256 + return true case OpPopCountUint64x2: - return rewriteValueAMD64_OpPopCountUint64x2(v) + v.Op = OpAMD64VPOPCNTQ128 + return true case OpPopCountUint64x4: - return rewriteValueAMD64_OpPopCountUint64x4(v) + v.Op = OpAMD64VPOPCNTQ256 + return true case OpPopCountUint64x8: - return rewriteValueAMD64_OpPopCountUint64x8(v) + v.Op = OpAMD64VPOPCNTQ512 + return true case OpPopCountUint8x16: - return rewriteValueAMD64_OpPopCountUint8x16(v) + v.Op = OpAMD64VPOPCNTB128 + return true case OpPopCountUint8x32: - return rewriteValueAMD64_OpPopCountUint8x32(v) + v.Op = OpAMD64VPOPCNTB256 + return true case OpPopCountUint8x64: - return rewriteValueAMD64_OpPopCountUint8x64(v) + v.Op = OpAMD64VPOPCNTB512 + return true case OpPrefetchCache: v.Op = OpAMD64PrefetchT0 return true @@ -3055,61 +3359,89 @@ func rewriteValueAMD64(v *Value) bool { case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) case OpSaturatedAddInt16x16: - return rewriteValueAMD64_OpSaturatedAddInt16x16(v) + v.Op = OpAMD64VPADDSW256 + return true case OpSaturatedAddInt16x32: - return rewriteValueAMD64_OpSaturatedAddInt16x32(v) + v.Op = OpAMD64VPADDSW512 + return true case OpSaturatedAddInt16x8: - return rewriteValueAMD64_OpSaturatedAddInt16x8(v) + v.Op = OpAMD64VPADDSW128 + return true case OpSaturatedAddInt8x16: - return rewriteValueAMD64_OpSaturatedAddInt8x16(v) + v.Op = OpAMD64VPADDSB128 + return true case OpSaturatedAddInt8x32: - return rewriteValueAMD64_OpSaturatedAddInt8x32(v) + v.Op = OpAMD64VPADDSB256 + return true case OpSaturatedAddInt8x64: - return rewriteValueAMD64_OpSaturatedAddInt8x64(v) + v.Op = OpAMD64VPADDSB512 + return true case OpSaturatedAddUint16x16: - return rewriteValueAMD64_OpSaturatedAddUint16x16(v) + v.Op = OpAMD64VPADDSW256 + return true case OpSaturatedAddUint16x32: - return rewriteValueAMD64_OpSaturatedAddUint16x32(v) + v.Op = OpAMD64VPADDSW512 + return true case OpSaturatedAddUint16x8: - return rewriteValueAMD64_OpSaturatedAddUint16x8(v) + v.Op = OpAMD64VPADDSW128 + return true case OpSaturatedAddUint8x16: - return rewriteValueAMD64_OpSaturatedAddUint8x16(v) + v.Op = OpAMD64VPADDSB128 + return true case OpSaturatedAddUint8x32: - return rewriteValueAMD64_OpSaturatedAddUint8x32(v) + v.Op = OpAMD64VPADDSB256 + return true case OpSaturatedAddUint8x64: - return rewriteValueAMD64_OpSaturatedAddUint8x64(v) + v.Op = OpAMD64VPADDSB512 + return true case OpSaturatedPairwiseAddInt16x16: - return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v) + v.Op = OpAMD64VPHADDSW256 + return true case OpSaturatedPairwiseAddInt16x8: - return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v) + v.Op = OpAMD64VPHADDSW128 + return true case OpSaturatedPairwiseSubInt16x16: - return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v) + v.Op = OpAMD64VPHSUBSW256 + return true case OpSaturatedPairwiseSubInt16x8: - return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v) + v.Op = OpAMD64VPHSUBSW128 + return true case OpSaturatedSubInt16x16: - return rewriteValueAMD64_OpSaturatedSubInt16x16(v) + v.Op = OpAMD64VPSUBSW256 + return true case OpSaturatedSubInt16x32: - return rewriteValueAMD64_OpSaturatedSubInt16x32(v) + v.Op = OpAMD64VPSUBSW512 + return true case OpSaturatedSubInt16x8: - return rewriteValueAMD64_OpSaturatedSubInt16x8(v) + v.Op = OpAMD64VPSUBSW128 + return true case OpSaturatedSubInt8x16: - return rewriteValueAMD64_OpSaturatedSubInt8x16(v) + v.Op = OpAMD64VPSUBSB128 + return true case OpSaturatedSubInt8x32: - return rewriteValueAMD64_OpSaturatedSubInt8x32(v) + v.Op = OpAMD64VPSUBSB256 + return true case OpSaturatedSubInt8x64: - return rewriteValueAMD64_OpSaturatedSubInt8x64(v) + v.Op = OpAMD64VPSUBSB512 + return true case OpSaturatedSubUint16x16: - return rewriteValueAMD64_OpSaturatedSubUint16x16(v) + v.Op = OpAMD64VPSUBSW256 + return true case OpSaturatedSubUint16x32: - return rewriteValueAMD64_OpSaturatedSubUint16x32(v) + v.Op = OpAMD64VPSUBSW512 + return true case OpSaturatedSubUint16x8: - return rewriteValueAMD64_OpSaturatedSubUint16x8(v) + v.Op = OpAMD64VPSUBSW128 + return true case OpSaturatedSubUint8x16: - return rewriteValueAMD64_OpSaturatedSubUint8x16(v) + v.Op = OpAMD64VPSUBSB128 + return true case OpSaturatedSubUint8x32: - return rewriteValueAMD64_OpSaturatedSubUint8x32(v) + v.Op = OpAMD64VPSUBSB256 + return true case OpSaturatedSubUint8x64: - return rewriteValueAMD64_OpSaturatedSubUint8x64(v) + v.Op = OpAMD64VPSUBSB512 + return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -3135,17 +3467,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64MOVBQSX return true case OpSignInt16x16: - return rewriteValueAMD64_OpSignInt16x16(v) + v.Op = OpAMD64VPSIGNW256 + return true case OpSignInt16x8: - return rewriteValueAMD64_OpSignInt16x8(v) + v.Op = OpAMD64VPSIGNW128 + return true case OpSignInt32x4: - return rewriteValueAMD64_OpSignInt32x4(v) + v.Op = OpAMD64VPSIGND128 + return true case OpSignInt32x8: - return rewriteValueAMD64_OpSignInt32x8(v) + v.Op = OpAMD64VPSIGND256 + return true case OpSignInt8x16: - return rewriteValueAMD64_OpSignInt8x16(v) + v.Op = OpAMD64VPSIGNB128 + return true case OpSignInt8x32: - return rewriteValueAMD64_OpSignInt8x32(v) + v.Op = OpAMD64VPSIGNB256 + return true case OpSlicemask: return rewriteValueAMD64_OpSlicemask(v) case OpSpectreIndex: @@ -3159,17 +3497,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SQRTSS return true case OpSqrtFloat32x16: - return rewriteValueAMD64_OpSqrtFloat32x16(v) + v.Op = OpAMD64VSQRTPS512 + return true case OpSqrtFloat32x4: - return rewriteValueAMD64_OpSqrtFloat32x4(v) + v.Op = OpAMD64VSQRTPS128 + return true case OpSqrtFloat32x8: - return rewriteValueAMD64_OpSqrtFloat32x8(v) + v.Op = OpAMD64VSQRTPS256 + return true case OpSqrtFloat64x2: - return rewriteValueAMD64_OpSqrtFloat64x2(v) + v.Op = OpAMD64VSQRTPD128 + return true case OpSqrtFloat64x4: - return rewriteValueAMD64_OpSqrtFloat64x4(v) + v.Op = OpAMD64VSQRTPD256 + return true case OpSqrtFloat64x8: - return rewriteValueAMD64_OpSqrtFloat64x8(v) + v.Op = OpAMD64VSQRTPD512 + return true case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -3194,68 +3538,98 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SUBL return true case OpSubFloat32x16: - return rewriteValueAMD64_OpSubFloat32x16(v) + v.Op = OpAMD64VADDPS512 + return true case OpSubFloat32x4: - return rewriteValueAMD64_OpSubFloat32x4(v) + v.Op = OpAMD64VADDPS128 + return true case OpSubFloat32x8: - return rewriteValueAMD64_OpSubFloat32x8(v) + v.Op = OpAMD64VADDPS256 + return true case OpSubFloat64x2: - return rewriteValueAMD64_OpSubFloat64x2(v) + v.Op = OpAMD64VADDPD128 + return true case OpSubFloat64x4: - return rewriteValueAMD64_OpSubFloat64x4(v) + v.Op = OpAMD64VADDPD256 + return true case OpSubFloat64x8: - return rewriteValueAMD64_OpSubFloat64x8(v) + v.Op = OpAMD64VADDPD512 + return true case OpSubInt16x16: - return rewriteValueAMD64_OpSubInt16x16(v) + v.Op = OpAMD64VPSUBW256 + return true case OpSubInt16x32: - return rewriteValueAMD64_OpSubInt16x32(v) + v.Op = OpAMD64VPSUBW512 + return true case OpSubInt16x8: - return rewriteValueAMD64_OpSubInt16x8(v) + v.Op = OpAMD64VPSUBW128 + return true case OpSubInt32x16: - return rewriteValueAMD64_OpSubInt32x16(v) + v.Op = OpAMD64VPSUBD512 + return true case OpSubInt32x4: - return rewriteValueAMD64_OpSubInt32x4(v) + v.Op = OpAMD64VPSUBD128 + return true case OpSubInt32x8: - return rewriteValueAMD64_OpSubInt32x8(v) + v.Op = OpAMD64VPSUBD256 + return true case OpSubInt64x2: - return rewriteValueAMD64_OpSubInt64x2(v) + v.Op = OpAMD64VPSUBQ128 + return true case OpSubInt64x4: - return rewriteValueAMD64_OpSubInt64x4(v) + v.Op = OpAMD64VPSUBQ256 + return true case OpSubInt64x8: - return rewriteValueAMD64_OpSubInt64x8(v) + v.Op = OpAMD64VPSUBQ512 + return true case OpSubInt8x16: - return rewriteValueAMD64_OpSubInt8x16(v) + v.Op = OpAMD64VPSUBB128 + return true case OpSubInt8x32: - return rewriteValueAMD64_OpSubInt8x32(v) + v.Op = OpAMD64VPSUBB256 + return true case OpSubInt8x64: - return rewriteValueAMD64_OpSubInt8x64(v) + v.Op = OpAMD64VPSUBB512 + return true case OpSubPtr: v.Op = OpAMD64SUBQ return true case OpSubUint16x16: - return rewriteValueAMD64_OpSubUint16x16(v) + v.Op = OpAMD64VPSUBW256 + return true case OpSubUint16x32: - return rewriteValueAMD64_OpSubUint16x32(v) + v.Op = OpAMD64VPSUBW512 + return true case OpSubUint16x8: - return rewriteValueAMD64_OpSubUint16x8(v) + v.Op = OpAMD64VPSUBW128 + return true case OpSubUint32x16: - return rewriteValueAMD64_OpSubUint32x16(v) + v.Op = OpAMD64VPSUBD512 + return true case OpSubUint32x4: - return rewriteValueAMD64_OpSubUint32x4(v) + v.Op = OpAMD64VPSUBD128 + return true case OpSubUint32x8: - return rewriteValueAMD64_OpSubUint32x8(v) + v.Op = OpAMD64VPSUBD256 + return true case OpSubUint64x2: - return rewriteValueAMD64_OpSubUint64x2(v) + v.Op = OpAMD64VPSUBQ128 + return true case OpSubUint64x4: - return rewriteValueAMD64_OpSubUint64x4(v) + v.Op = OpAMD64VPSUBQ256 + return true case OpSubUint64x8: - return rewriteValueAMD64_OpSubUint64x8(v) + v.Op = OpAMD64VPSUBQ512 + return true case OpSubUint8x16: - return rewriteValueAMD64_OpSubUint8x16(v) + v.Op = OpAMD64VPSUBB128 + return true case OpSubUint8x32: - return rewriteValueAMD64_OpSubUint8x32(v) + v.Op = OpAMD64VPSUBB256 + return true case OpSubUint8x64: - return rewriteValueAMD64_OpSubUint8x64(v) + v.Op = OpAMD64VPSUBB512 + return true case OpTailCall: v.Op = OpAMD64CALLtail return true @@ -3295,57 +3669,83 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64XORL return true case OpXorFloat32x16: - return rewriteValueAMD64_OpXorFloat32x16(v) + v.Op = OpAMD64VXORPS512 + return true case OpXorFloat32x4: - return rewriteValueAMD64_OpXorFloat32x4(v) + v.Op = OpAMD64VXORPS128 + return true case OpXorFloat32x8: - return rewriteValueAMD64_OpXorFloat32x8(v) + v.Op = OpAMD64VXORPS256 + return true case OpXorFloat64x2: - return rewriteValueAMD64_OpXorFloat64x2(v) + v.Op = OpAMD64VXORPD128 + return true case OpXorFloat64x4: - return rewriteValueAMD64_OpXorFloat64x4(v) + v.Op = OpAMD64VXORPD256 + return true case OpXorFloat64x8: - return rewriteValueAMD64_OpXorFloat64x8(v) + v.Op = OpAMD64VXORPD512 + return true case OpXorInt16x16: - return rewriteValueAMD64_OpXorInt16x16(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt16x8: - return rewriteValueAMD64_OpXorInt16x8(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt32x16: - return rewriteValueAMD64_OpXorInt32x16(v) + v.Op = OpAMD64VPXORD512 + return true case OpXorInt32x4: - return rewriteValueAMD64_OpXorInt32x4(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt32x8: - return rewriteValueAMD64_OpXorInt32x8(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt64x2: - return rewriteValueAMD64_OpXorInt64x2(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt64x4: - return rewriteValueAMD64_OpXorInt64x4(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt64x8: - return rewriteValueAMD64_OpXorInt64x8(v) + v.Op = OpAMD64VPXORQ512 + return true case OpXorInt8x16: - return rewriteValueAMD64_OpXorInt8x16(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt8x32: - return rewriteValueAMD64_OpXorInt8x32(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint16x16: - return rewriteValueAMD64_OpXorUint16x16(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint16x8: - return rewriteValueAMD64_OpXorUint16x8(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint32x16: - return rewriteValueAMD64_OpXorUint32x16(v) + v.Op = OpAMD64VPXORD512 + return true case OpXorUint32x4: - return rewriteValueAMD64_OpXorUint32x4(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint32x8: - return rewriteValueAMD64_OpXorUint32x8(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint64x2: - return rewriteValueAMD64_OpXorUint64x2(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint64x4: - return rewriteValueAMD64_OpXorUint64x4(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint64x8: - return rewriteValueAMD64_OpXorUint64x8(v) + v.Op = OpAMD64VPXORQ512 + return true case OpXorUint8x16: - return rewriteValueAMD64_OpXorUint8x16(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint8x32: - return rewriteValueAMD64_OpXorUint8x32(v) + v.Op = OpAMD64VPXOR256 + return true case OpZero: return rewriteValueAMD64_OpZero(v) case OpZeroExt16to32: @@ -26050,20295 +26450,16687 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAbsoluteInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x16 x) - // result: (VPABSW256 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x32 x) - // result: (VPABSW512 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x8 x) - // result: (VPABSW128 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] - // match: (AbsoluteInt32x16 x) - // result: (VPABSD512 x) + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) for { - x := v_0 - v.reset(OpAMD64VPABSD512) - v.AddArg(x) + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) return true } } -func rewriteValueAMD64_OpAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt32x4 x) - // result: (VPABSD128 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) for { - x := v_0 - v.reset(OpAMD64VPABSD128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt32x8 x) - // result: (VPABSD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) for { - x := v_0 - v.reset(OpAMD64VPABSD256) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x2 x) - // result: (VPABSQ128 x) + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x4 x) - // result: (VPABSQ256 x) + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ256) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x8 x) - // result: (VPABSQ512 x) + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ512) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x16 x) - // result: (VPABSB128 x) + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSB128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x32 x) - // result: (VPABSB256 x) + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) for { - x := v_0 - v.reset(OpAMD64VPABSB256) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x64 x) - // result: (VPABSB512 x) + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) for { - x := v_0 - v.reset(OpAMD64VPABSB512) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x16 x y) - // result: (VADDPS512 y x) + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x4 x y) - // result: (VADDPS128 y x) + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x8 x y) - // result: (VADDPS256 y x) + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x2 x y) - // result: (VADDPD128 y x) + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD128) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x4 x y) - // result: (VADDPD256 y x) + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD256) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x8 x y) - // result: (VADDPD512 y x) + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD512) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x16 x y) - // result: (VPADDW256 y x) + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW256) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddInt16x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x32 x y) - // result: (VPADDW512 y x) + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x8 x y) - // result: (VPADDW128 y x) + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x16 x y) - // result: (VPADDD512 y x) + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x4 x y) - // result: (VPADDD128 y x) + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x8 x y) - // result: (VPADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x2 x y) - // result: (VPADDQ128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x4 x y) - // result: (VPADDQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x8 x y) - // result: (VPADDQ512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen16(v *Value) bool { v_0 := v.Args[0] - // match: (AddInt8x16 x y) - // result: (VPADDB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddInt8x32 x y) - // result: (VPADDB256 y x) + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddInt8x64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen32(v *Value) bool { v_0 := v.Args[0] - // match: (AddInt8x64 x y) - // result: (VPADDB512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint16x16 x y) - // result: (VPADDW256 y x) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint16x32 x y) - // result: (VPADDW512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint16x8 x y) - // result: (VPADDW128 y x) + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen8(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint32x16 x y) - // result: (VPADDD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint32x4 x y) - // result: (VPADDD128 y x) + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint32x8 x y) - // result: (VPADDD256 y x) + // match: (Bswap16 x) + // result: (ROLWconst [8] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeil(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint64x2 x y) - // result: (VPADDQ128 y x) + // match: (Ceil x) + // result: (ROUNDSD [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ128) - v.AddArg2(y, x) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddUint64x4(v *Value) bool { +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddUint64x4 x y) - // result: (VPADDQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDQ256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint64x8 x y) - // result: (VPADDQ512 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x16 x y) - // result: (VPADDB128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x32 x y) - // result: (VPADDB256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x64 x y) - // result: (VPADDB512 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpAddr(v *Value) bool { - v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) - for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x16 x y) - // result: (VANDPS512 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x4 x y) - // result: (VANDPS128 y x) + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x8 x y) - // result: (VANDPS256 y x) + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x2 x y) - // result: (VANDPD128 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x4 x y) - // result: (VANDPD256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x8 x y) - // result: (VANDPD512 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt16x16 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt16x8 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x16 x y) - // result: (VPANDD512 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x4 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x8 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x2 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x4 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x8 x y) - // result: (VPANDQ512 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt8x16 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt8x32 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x16 x y) - // result: (VANDNPS512 y x) + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x4 x y) - // result: (VANDNPS128 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x8 x y) - // result: (VANDNPS256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS256) - v.AddArg2(y, x) - return true + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) + return true } -} -func rewriteValueAMD64_OpAndNotFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x2 x y) - // result: (VANDNPD128 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x4 x y) - // result: (VANDNPD256 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x8 x y) - // result: (VANDNPD512 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt16x16 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt16x8 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x16 x y) - // result: (VPANDND512 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDND512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x4 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x8 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x2 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x4 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x8 x y) - // result: (VPANDNQ512 y x) + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDNQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt8x16 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt8x32 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint16x16 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint16x8 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x16 x y) - // result: (VPANDND512 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDND512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x4 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x8 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x2 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x4 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x8 x y) - // result: (VPANDNQ512 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDNQ512) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint8x16 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint8x32 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpAndUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint16x16 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } + return false } -func rewriteValueAMD64_OpAndUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint16x8 x y) - // result: (VPAND128 y x) +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x16 x y) - // result: (VPANDD512 y x) +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPANDD512) - v.AddArg2(y, x) + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x4 x y) - // result: (VPAND128 y x) +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x8 x y) - // result: (VPAND256 y x) +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) return true } } -func rewriteValueAMD64_OpAndUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint64x2 x y) - // result: (VPAND128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAndUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint64x4 x y) - // result: (VPAND256 y x) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpAndUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint64x8 x y) - // result: (VPANDQ512 y x) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPANDQ512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpAndUint8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint8x16 x y) - // result: (VPAND128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpAndUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint8x32 x y) - // result: (VPAND256 y x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x16 x) - // result: (VRCP14PS512 x) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - v.reset(OpAMD64VRCP14PS512) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x4 x) - // result: (VRCP14PS128 x) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - v.reset(OpAMD64VRCP14PS128) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpCtz64(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x8 x) - // result: (VRCP14PS256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - v.reset(OpAMD64VRCP14PS256) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x2 x) - // result: (VRCP14PD128 x) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) for { + t := v.Type x := v_0 - v.reset(OpAMD64VRCP14PD128) - v.AddArg(x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x4 x) - // result: (VRCP14PD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - v.reset(OpAMD64VRCP14PD256) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x8 x) - // result: (VRCP14PD512 x) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) for { x := v_0 - v.reset(OpAMD64VRCP14PD512) - v.AddArg(x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCtz8(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x16 x) - // result: (VRSQRT14PS512 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) for { x := v_0 - v.reset(OpAMD64VRSQRT14PS512) - v.AddArg(x) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x4 x) - // result: (VRSQRTPS128 x) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - v.reset(OpAMD64VRSQRTPS128) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x8 x) - // result: (VRSQRTPS256 x) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - v.reset(OpAMD64VRSQRTPS256) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x2 x) - // result: (VRSQRT14PD128 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 - v.reset(OpAMD64VRSQRT14PD128) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x4 x) - // result: (VRSQRT14PD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) for { x := v_0 - v.reset(OpAMD64VRSQRT14PD256) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x8 x) - // result: (VRSQRT14PD512 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 - v.reset(OpAMD64VRSQRT14PD512) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) + b := v.Block + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) + b := v.Block + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { +func rewriteValueAMD64_OpEqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) + b := v.Block + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { +func rewriteValueAMD64_OpEqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) + b := v.Block + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] x y) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x16 x y) - // result: (VPAVGW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x32 x y) - // result: (VPAVGW512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x8 x y) - // result: (VPAVGW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x16 x y) - // result: (VPAVGB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x32 x y) - // result: (VPAVGB256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x64 x y) - // result: (VPAVGB512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpBitLen16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) - return true - } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + // match: (EqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + // match: (EqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) +} +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + // match: (EqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) +} +func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) +} +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) return true } - return false } -func rewriteValueAMD64_OpBswap16(v *Value) bool { +func rewriteValueAMD64_OpFloor(v *Value) bool { v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) + // match: (Floor x) + // result: (ROUNDSD [1] x) for { x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpCeil(v *Value) bool { +func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) return true } + return false } -func rewriteValueAMD64_OpCondSelect(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) +} +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) +} +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) +} +func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) +} +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) +} +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) +} +func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) +} +func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) +} +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { - break - } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) +func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) - for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) - return true - } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) - for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpCtz16(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz32(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) + // match: (GreaterUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz64(v *Value) bool { - v_0 := v.Args[0] +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) +} +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpCtz8(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] x y) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpDiv16(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv16u(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv32(v *Value) bool { +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv32u(v *Value) bool { +func rewriteValueAMD64_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv64(v *Value) bool { +func rewriteValueAMD64_OpLeq16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv64u(v *Value) bool { +func rewriteValueAMD64_OpLeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv8(v *Value) bool { +func rewriteValueAMD64_OpLeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv8u(v *Value) bool { +func rewriteValueAMD64_OpLeq32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x16 x y) - // result: (VDIVPS512 y x) + b := v.Block + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS512) - v.AddArg2(y, x) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x4 x y) - // result: (VDIVPS128 y x) + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS128) - v.AddArg2(y, x) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLeq64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x8 x y) - // result: (VDIVPS256 y x) + b := v.Block + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS256) - v.AddArg2(y, x) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x2 x y) - // result: (VDIVPD128 y x) + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD128) - v.AddArg2(y, x) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLeq8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x4 x y) - // result: (VDIVPD256 y x) + b := v.Block + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD256) - v.AddArg2(y, x) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLess16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x8 x y) - // result: (VDIVPD512 y x) + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD512) - v.AddArg2(y, x) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq16(v *Value) bool { +func rewriteValueAMD64_OpLess16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) + // match: (Less16U x y) + // result: (SETB (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq32(v *Value) bool { +func rewriteValueAMD64_OpLess32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) + // match: (Less32 x y) + // result: (SETL (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq32F(v *Value) bool { +func rewriteValueAMD64_OpLess32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) + v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq64(v *Value) bool { +func rewriteValueAMD64_OpLess32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) + // match: (Less32U x y) + // result: (SETB (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq64F(v *Value) bool { +func rewriteValueAMD64_OpLess64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq8(v *Value) bool { +func rewriteValueAMD64_OpLess64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqB(v *Value) bool { +func rewriteValueAMD64_OpLess8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) + // match: (Less8 x y) + // result: (SETL (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqPtr(v *Value) bool { +func rewriteValueAMD64_OpLess8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) + // match: (Less8U x y) + // result: (SETB (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat32x4 x y) - // result: (VCMPPS128 [0] y x) + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat32x8 x y) - // result: (VCMPPS256 [0] y x) + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat64x2 x y) - // result: (VCMPPD128 [0] y x) + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat64x4 x y) - // result: (VCMPPD256 [0] y x) + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt16x16 x y) - // result: (VPCMPEQW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPEQW512 y x)) + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) - v0.AddArg2(y, x) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt16x8 x y) - // result: (VPCMPEQW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt32x4 x y) - // result: (VPCMPEQD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt32x8 x y) - // result: (VPCMPEQD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt64x2 x y) - // result: (VPCMPEQQ128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQQ128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt64x4 x y) - // result: (VPCMPEQQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQQ256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) - v0.AddArg2(y, x) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt8x16 x y) - // result: (VPCMPEQB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQB128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt8x32 x y) - // result: (VPCMPEQB256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQB256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) + // match: (LessEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) + // match: (LessEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) + // match: (LessEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) + // match: (LessEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) + // match: (LessEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) + // match: (LessEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) + // match: (LessEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) + // match: (LessEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) - for { - x := v_0 - y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) - return true - } -} -func rewriteValueAMD64_OpFloor(v *Value) bool { - v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetG(v *Value) bool { - v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) - for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { - break - } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [5] y x) + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [5] y x) + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [5] y x) + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [5] y x) + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) + // match: (LessInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) + // match: (LessInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) + // match: (LessInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) + // match: (LessInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) + // match: (LessInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) + // match: (LessInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) + // match: (LessInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) + // match: (LessInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) + // match: (LessUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) + // match: (LessUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) - v.AddArg(v0) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) + // match: (LessUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) + // match: (LessUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) + // match: (LessUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) + // match: (LessUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) + // match: (LessUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) + // match: (LessUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLoad(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [6] y x) + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [6] y x) + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [6] y x) + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [6] y x) + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt16x16 x y) - // result: (VPCMPGTW256 y x) + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPCMPGTW256) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPGTW512 y x)) + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt16x8 x y) - // result: (VPCMPGTW128 y x) + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPCMPGTW128) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) return true } + return false } -func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) v.AddArg(v0) return true } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false } -func rewriteValueAMD64_OpGreaterInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterInt32x4 x y) - // result: (VPCMPGTD128 y x) + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTD128) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt32x8 x y) - // result: (VPCMPGTD256 y x) + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTD256) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt64x4 x y) - // result: (VPCMPGTQ256 y x) + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTQ256) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt8x16 x y) - // result: (VPCMPGTB128 y x) + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTB128) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterInt8x32 x y) - // result: (VPCMPGTB256 y x) + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTB256) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IsNanFloat32x4 x y) - // result: (VCMPPS128 [3] y x) + b := v.Block + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat32x8 x y) - // result: (VCMPPS256 [3] y x) + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IsNanFloat64x2 x y) - // result: (VCMPPD128 [3] y x) + b := v.Block + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat64x4 x y) - // result: (VCMPPD256 [3] y x) + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpLeq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpLeq16U(v *Value) bool { +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) v0.AddArg2(x, y) - v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpLeq32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpLeq32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) + // match: (MaskedAbsoluteInt16x16 x mask) + // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) + // match: (MaskedAbsoluteInt16x32 x mask) + // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) + // match: (MaskedAbsoluteInt16x8 x mask) + // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) + // match: (MaskedAbsoluteInt32x16 x mask) + // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) + // match: (MaskedAbsoluteInt32x4 x mask) + // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) + // match: (MaskedAbsoluteInt32x8 x mask) + // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq8U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) + // match: (MaskedAbsoluteInt64x2 x mask) + // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) + // match: (MaskedAbsoluteInt64x4 x mask) + // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) + // match: (MaskedAbsoluteInt64x8 x mask) + // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) + // match: (MaskedAbsoluteInt8x16 x mask) + // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) + // match: (MaskedAbsoluteInt8x32 x mask) + // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) + // match: (MaskedAbsoluteInt8x64 x mask) + // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) + // match: (MaskedAddFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) + // match: (MaskedAddFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) + // match: (MaskedAddFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) + // match: (MaskedAddFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) + // match: (MaskedAddFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) + // match: (MaskedAddFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x4 x y) - // result: (VCMPPS128 [2] y x) + b := v.Block + // match: (MaskedAddInt16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x8 x y) - // result: (VCMPPS256 [2] y x) + b := v.Block + // match: (MaskedAddInt16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x2 x y) - // result: (VCMPPD128 [2] y x) + b := v.Block + // match: (MaskedAddInt16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x4 x y) - // result: (VCMPPD256 [2] y x) + b := v.Block + // match: (MaskedAddInt32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) + // match: (MaskedAddInt32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) + // match: (MaskedAddInt32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) + // match: (MaskedAddInt64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) + // match: (MaskedAddInt64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) + // match: (MaskedAddInt64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) + // match: (MaskedAddInt8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) + // match: (MaskedAddInt8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) + // match: (MaskedAddInt8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) + // match: (MaskedAddUint16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) + // match: (MaskedAddUint16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) + // match: (MaskedAddUint16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) + // match: (MaskedAddUint32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) + // match: (MaskedAddUint32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) + // match: (MaskedAddUint32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) + // match: (MaskedAddUint64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) + // match: (MaskedAddUint64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) + // match: (MaskedAddUint64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) + // match: (MaskedAddUint8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) + // match: (MaskedAddUint8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) + // match: (MaskedAddUint8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) + // match: (MaskedAndFloat32x16 x y mask) + // result: (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) + // match: (MaskedAndFloat32x4 x y mask) + // result: (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) + // match: (MaskedAndFloat32x8 x y mask) + // result: (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) + // match: (MaskedAndFloat64x2 x y mask) + // result: (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) + // match: (MaskedAndFloat64x4 x y mask) + // result: (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) + // match: (MaskedAndFloat64x8 x y mask) + // result: (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x4 x y) - // result: (VCMPPS128 [1] y x) + b := v.Block + // match: (MaskedAndInt32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x8 x y) - // result: (VCMPPS256 [1] y x) + b := v.Block + // match: (MaskedAndInt32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x2 x y) - // result: (VCMPPD128 [1] y x) + b := v.Block + // match: (MaskedAndInt32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x4 x y) - // result: (VCMPPD256 [1] y x) + b := v.Block + // match: (MaskedAndInt64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) + // match: (MaskedAndInt64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) + // match: (MaskedAndInt64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) + // match: (MaskedAndNotFloat32x16 x y mask) + // result: (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) + // match: (MaskedAndNotFloat32x4 x y mask) + // result: (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) + // match: (MaskedAndNotFloat32x8 x y mask) + // result: (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) + // match: (MaskedAndNotFloat64x2 x y mask) + // result: (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) + // match: (MaskedAndNotFloat64x4 x y mask) + // result: (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) + // match: (MaskedAndNotFloat64x8 x y mask) + // result: (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) + // match: (MaskedAndNotInt32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) + // match: (MaskedAndNotInt32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) + // match: (MaskedAndNotInt32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) + // match: (MaskedAndNotInt64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) + // match: (MaskedAndNotInt64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) + // match: (MaskedAndNotInt64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) + // match: (MaskedAndNotUint32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) + // match: (MaskedAndNotUint32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) + // match: (MaskedAndNotUint32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) + // match: (MaskedAndNotUint64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) + // match: (MaskedAndNotUint64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) + // match: (MaskedAndNotUint64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) + // match: (MaskedAndUint32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) + // match: (MaskedAndUint32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) + // match: (MaskedAndUint32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) + // match: (MaskedAndUint64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) + // match: (MaskedAndUint64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLoad(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) + b := v.Block + // match: (MaskedAndUint64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) + // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (MaskedAverageUint16x16 x y mask) + // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x32 x y mask) + // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (MaskedAverageUint16x8 x y mask) + // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x16 x y mask) + // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (MaskedAverageUint8x32 x y mask) + // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x64 x y mask) + // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (MaskedDivFloat32x16 x y mask) + // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x4 x y mask) + // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (MaskedDivFloat32x8 x y mask) + // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x2 x y mask) + // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (MaskedDivFloat64x4 x y mask) + // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x8 x y mask) + // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) - return true - } - return false } -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x16 x mask) - // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x32 x mask) - // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x8 x mask) - // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x16 x mask) - // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x4 x mask) - // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x8 x mask) - // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x2 x mask) - // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x4 x mask) - // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x8 x mask) - // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x16 x mask) - // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x32 x mask) - // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x64 x mask) - // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x16 x y mask) - // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x4 x y mask) - // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x8 x y mask) - // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x2 x y mask) - // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x4 x y mask) - // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x8 x y mask) - // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x16 x y mask) - // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x32 x y mask) - // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x8 x y mask) - // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x16 x y mask) - // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x4 x y mask) - // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x8 x y mask) - // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x2 x y mask) - // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x4 x y mask) - // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x8 x y mask) - // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x16 x y mask) - // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x32 x y mask) - // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x64 x y mask) - // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x16 x y mask) - // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x32 x y mask) - // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x8 x y mask) - // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x16 x y mask) - // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x4 x y mask) - // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x8 x y mask) - // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x2 x y mask) - // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x4 x y mask) - // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x8 x y mask) - // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x16 x y mask) - // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x32 x y mask) - // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x64 x y mask) - // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x16 x y mask) - // result: (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x4 x y mask) - // result: (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x8 x y mask) - // result: (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x2 x y mask) - // result: (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x4 x y mask) - // result: (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x8 x y mask) - // result: (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x16 x y mask) - // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x4 x y mask) - // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x8 x y mask) - // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x2 x y mask) - // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x4 x y mask) - // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x8 x y mask) - // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x16 x y mask) - // result: (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x4 x y mask) - // result: (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x8 x y mask) - // result: (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x2 x y mask) - // result: (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x4 x y mask) - // result: (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x8 x y mask) - // result: (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x16 x y mask) - // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x4 x y mask) - // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x8 x y mask) - // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x2 x y mask) - // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x4 x y mask) - // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x8 x y mask) - // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x16 x y mask) - // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x4 x y mask) - // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x8 x y mask) - // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x2 x y mask) - // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x4 x y mask) - // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x8 x y mask) - // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x16 x y mask) - // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x4 x y mask) - // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x8 x y mask) - // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x2 x y mask) - // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x4 x y mask) - // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x8 x y mask) - // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x16 x y mask) - // result: (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x32 x y mask) - // result: (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x8 x y mask) - // result: (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x16 x y mask) - // result: (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x32 x y mask) - // result: (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x64 x y mask) - // result: (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x16 x y mask) - // result: (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x4 x y mask) - // result: (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x8 x y mask) - // result: (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x2 x y mask) - // result: (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x4 x y mask) - // result: (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x8 x y mask) - // result: (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMaxFloat32x16 x y mask) + // result: (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMaxFloat32x4 x y mask) + // result: (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMaxFloat32x8 x y mask) + // result: (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxFloat64x2 x y mask) + // result: (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxFloat64x4 x y mask) + // result: (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxFloat64x8 x y mask) + // result: (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxInt16x16 x y mask) + // result: (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxInt16x32 x y mask) + // result: (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxInt16x8 x y mask) + // result: (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMaxInt32x16 x y mask) + // result: (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMaxInt32x4 x y mask) + // result: (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMaxInt32x8 x y mask) + // result: (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxInt64x2 x y mask) + // result: (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxInt64x4 x y mask) + // result: (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxInt64x8 x y mask) + // result: (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxInt8x16 x y mask) + // result: (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxInt8x32 x y mask) + // result: (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxInt8x64 x y mask) + // result: (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMaxUint16x16 x y mask) + // result: (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMaxUint16x32 x y mask) + // result: (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMaxUint16x8 x y mask) + // result: (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxUint32x16 x y mask) + // result: (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxUint32x4 x y mask) + // result: (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxUint32x8 x y mask) + // result: (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxUint64x2 x y mask) + // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxUint64x4 x y mask) + // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxUint64x8 x y mask) + // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMaxUint8x16 x y mask) + // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMaxUint8x32 x y mask) + // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMaxUint8x64 x y mask) + // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMinFloat32x16 x y mask) + // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMinFloat32x4 x y mask) + // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMinFloat32x8 x y mask) + // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinFloat64x2 x y mask) + // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinFloat64x4 x y mask) + // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinFloat64x8 x y mask) + // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinInt16x16 x y mask) + // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinInt16x32 x y mask) + // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinInt16x8 x y mask) + // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMinInt32x16 x y mask) + // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMinInt32x4 x y mask) + // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMinInt32x8 x y mask) + // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinInt64x2 x y mask) + // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinInt64x4 x y mask) + // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinInt64x8 x y mask) + // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinInt8x16 x y mask) + // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true + v.reset(OpAMD64VPMINSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinInt8x32 x y mask) + // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinInt8x64 x y mask) + // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinUint16x16 x y mask) + // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinUint16x32 x y mask) + // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinUint16x8 x y mask) + // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinUint32x16 x y mask) + // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinUint32x4 x y mask) + // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinUint32x8 x y mask) + // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMinUint64x2 x y mask) + // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMinUint64x4 x y mask) + // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMinUint64x8 x y mask) + // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinUint8x16 x y mask) + // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinUint8x32 x y mask) + // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinUint8x64 x y mask) + // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMulByPowOf2Float32x16 x y mask) + // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMulByPowOf2Float32x4 x y mask) + // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMulByPowOf2Float32x8 x y mask) + // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMulByPowOf2Float64x2 x y mask) + // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMulByPowOf2Float64x4 x y mask) + // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMulByPowOf2Float64x8 x y mask) + // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMulEvenWidenInt64x2 x y mask) + // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMulEvenWidenInt64x4 x y mask) + // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMulEvenWidenInt64x8 x y mask) + // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMulEvenWidenUint64x2 x y mask) + // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMulEvenWidenUint64x4 x y mask) + // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMulEvenWidenUint64x8 x y mask) + // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMulFloat32x16 x y mask) + // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMulFloat32x4 x y mask) + // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMulFloat32x8 x y mask) + // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMulFloat64x2 x y mask) + // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMulFloat64x4 x y mask) + // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMulFloat64x8 x y mask) + // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMulHighInt16x16 x y mask) + // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMulHighInt16x32 x y mask) + // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true + v.reset(OpAMD64VPMULHWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x8 x y mask) + // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x16 x y mask) + // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x32 x y mask) + // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x8 x y mask) + // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedNotEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedNotEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedNotEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedNotEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedNotEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedNotEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedNotEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedNotEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedNotEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedNotEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedNotEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedNotEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedNotEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedNotEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedNotEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedNotEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x16 x y mask) - // result: (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedOrFloat32x16 x y mask) + // result: (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked512) + v.reset(OpAMD64VORPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x4 x y mask) - // result: (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedOrFloat32x4 x y mask) + // result: (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked128) + v.reset(OpAMD64VORPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x8 x y mask) - // result: (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedOrFloat32x8 x y mask) + // result: (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked256) + v.reset(OpAMD64VORPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x2 x y mask) - // result: (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedOrFloat64x2 x y mask) + // result: (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked128) + v.reset(OpAMD64VORPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x4 x y mask) - // result: (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedOrFloat64x4 x y mask) + // result: (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked256) + v.reset(OpAMD64VORPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x8 x y mask) - // result: (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedOrFloat64x8 x y mask) + // result: (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked512) + v.reset(OpAMD64VORPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x16 x y mask) - // result: (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedOrInt32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x32 x y mask) - // result: (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedOrInt32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x8 x y mask) - // result: (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedOrInt32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x16 x y mask) - // result: (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedOrInt64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x4 x y mask) - // result: (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedOrInt64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x8 x y mask) - // result: (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedOrInt64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x2 x y mask) - // result: (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedOrUint32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x4 x y mask) - // result: (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedOrUint32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x8 x y mask) - // result: (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedOrUint32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x16 x y mask) - // result: (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedOrUint64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x32 x y mask) - // result: (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedOrUint64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x64 x y mask) - // result: (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedOrUint64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x16 x y mask) - // result: (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedPopCountInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x32 x y mask) - // result: (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedPopCountInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x8 x y mask) - // result: (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedPopCountInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x16 x y mask) - // result: (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x4 x y mask) - // result: (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x8 x y mask) - // result: (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x2 x y mask) - // result: (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedPopCountInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x4 x y mask) - // result: (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedPopCountInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x8 x y mask) - // result: (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedPopCountInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x16 x y mask) - // result: (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedPopCountInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x32 x y mask) - // result: (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedPopCountInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x64 x y mask) - // result: (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedPopCountInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x16 x y mask) - // result: (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x4 x y mask) - // result: (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x8 x y mask) - // result: (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x2 x y mask) - // result: (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedPopCountUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x4 x y mask) - // result: (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedPopCountUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x8 x y mask) - // result: (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedPopCountUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x16 x y mask) - // result: (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedPopCountUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x32 x y mask) - // result: (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedPopCountUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x8 x y mask) - // result: (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedPopCountUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x16 x y mask) - // result: (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x4 x y mask) - // result: (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x8 x y mask) - // result: (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x2 x y mask) - // result: (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedSaturatedAddInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x4 x y mask) - // result: (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedSaturatedAddInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x8 x y mask) - // result: (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedSaturatedAddInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x16 x y mask) - // result: (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedSaturatedAddInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked128) + v.reset(OpAMD64VPADDSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x32 x y mask) - // result: (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedSaturatedAddInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked256) + v.reset(OpAMD64VPADDSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x64 x y mask) - // result: (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedSaturatedAddInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked512) + v.reset(OpAMD64VPADDSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x16 x y mask) - // result: (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedAddUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked256) + v.reset(OpAMD64VPADDSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x32 x y mask) - // result: (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedAddUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked512) + v.reset(OpAMD64VPADDSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x8 x y mask) - // result: (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedAddUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked128) + v.reset(OpAMD64VPADDSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x16 x y mask) - // result: (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x4 x y mask) - // result: (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x8 x y mask) - // result: (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x2 x y mask) - // result: (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x4 x y mask) - // result: (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x8 x y mask) - // result: (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x16 x y mask) - // result: (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedSaturatedAddUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked128) + v.reset(OpAMD64VPADDSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x32 x y mask) - // result: (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedSaturatedAddUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked256) + v.reset(OpAMD64VPADDSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x64 x y mask) - // result: (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedSaturatedAddUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked512) + v.reset(OpAMD64VPADDSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x16 x y mask) - // result: (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x4 x y mask) - // result: (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x8 x y mask) - // result: (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x2 x y mask) - // result: (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x4 x y mask) - // result: (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x8 x y mask) - // result: (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x2 x y mask) - // result: (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x4 x y mask) - // result: (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x8 x y mask) - // result: (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x2 x y mask) - // result: (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x4 x y mask) - // result: (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x8 x y mask) - // result: (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x16 x y mask) - // result: (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x4 x y mask) - // result: (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x8 x y mask) - // result: (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x2 x y mask) - // result: (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x4 x y mask) - // result: (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x8 x y mask) - // result: (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x16 x y mask) - // result: (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedSubInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) + v.reset(OpAMD64VPSUBSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x32 x y mask) - // result: (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedSubInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) + v.reset(OpAMD64VPSUBSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x8 x y mask) - // result: (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedSubInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) + v.reset(OpAMD64VPSUBSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x16 x y mask) - // result: (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedSubInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x32 x y mask) - // result: (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedSubInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x8 x y mask) - // result: (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedSubInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x16 x y mask) - // result: (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x32 x y mask) - // result: (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x8 x y mask) - // result: (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x16 x y mask) - // result: (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x4 x y mask) - // result: (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x8 x y mask) - // result: (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x2 x y mask) - // result: (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x4 x y mask) - // result: (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x8 x y mask) - // result: (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x16 x y mask) - // result: (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x4 x y mask) - // result: (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x8 x y mask) - // result: (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x2 x y mask) - // result: (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x4 x y mask) - // result: (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x8 x y mask) - // result: (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x16 x y mask) - // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x4 x y mask) - // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x8 x y mask) - // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x2 x y mask) - // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x4 x y mask) - // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x8 x y mask) - // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x16 x y mask) - // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x4 x y mask) - // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x8 x y mask) - // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x2 x y mask) - // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x4 x y mask) - // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x8 x y mask) - // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x16 x y mask) - // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x32 x y mask) - // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x8 x y mask) - // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x16 x y mask) - // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x32 x y mask) - // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x64 x y mask) - // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x16 x y mask) - // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x32 x y mask) - // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x8 x y mask) - // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x16 x y mask) - // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x32 x y mask) - // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x64 x y mask) - // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x16 x y mask) - // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x32 x y mask) - // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x8 x y mask) - // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x16 x y mask) - // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x32 x y mask) - // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x64 x y mask) - // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x16 x y mask) - // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x32 x y mask) - // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x8 x y mask) - // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x16 x y mask) - // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x32 x y mask) - // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x64 x y mask) - // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x16 x mask) - // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x4 x mask) - // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x8 x mask) - // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x2 x mask) - // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x4 x mask) - // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x8 x mask) - // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x16 x y mask) - // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x4 x y mask) - // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x8 x y mask) - // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x2 x y mask) - // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x4 x y mask) - // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x8 x y mask) - // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x16 x y mask) - // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x32 x y mask) - // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x8 x y mask) - // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x16 x y mask) - // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x4 x y mask) - // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x8 x y mask) - // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x2 x y mask) - // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x4 x y mask) - // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x8 x y mask) - // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x16 x y mask) - // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x32 x y mask) - // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x64 x y mask) - // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x16 x y mask) - // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x32 x y mask) - // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x8 x y mask) - // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x16 x y mask) - // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x4 x y mask) - // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x8 x y mask) - // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x2 x y mask) - // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x4 x y mask) - // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x8 x y mask) - // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x16 x y mask) - // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x32 x y mask) - // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x64 x y mask) - // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x16 x y mask) - // result: (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x4 x y mask) - // result: (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x8 x y mask) - // result: (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x2 x y mask) - // result: (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x4 x y mask) - // result: (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x8 x y mask) - // result: (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x16 x y mask) - // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x4 x y mask) - // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x8 x y mask) - // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x2 x y mask) - // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x4 x y mask) - // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x8 x y mask) - // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x16 x y mask) - // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x4 x y mask) - // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x8 x y mask) - // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x2 x y mask) - // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x4 x y mask) - // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x8 x y mask) - // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMax32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMax64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x16 x y) - // result: (VMAXPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x4 x y) - // result: (VMAXPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x8 x y) - // result: (VMAXPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x2 x y) - // result: (VMAXPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x4 x y) - // result: (VMAXPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x8 x y) - // result: (VMAXPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x16 x y) - // result: (VPMAXSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x32 x y) - // result: (VPMAXSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x8 x y) - // result: (VPMAXSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x16 x y) - // result: (VPMAXSD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x4 x y) - // result: (VPMAXSD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x8 x y) - // result: (VPMAXSD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x2 x y) - // result: (VPMAXSQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x4 x y) - // result: (VPMAXSQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x8 x y) - // result: (VPMAXSQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x16 x y) - // result: (VPMAXSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x32 x y) - // result: (VPMAXSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x64 x y) - // result: (VPMAXSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x16 x y) - // result: (VPMAXUW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x32 x y) - // result: (VPMAXUW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x8 x y) - // result: (VPMAXUW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x16 x y) - // result: (VPMAXUD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x4 x y) - // result: (VPMAXUD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x8 x y) - // result: (VPMAXUD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x2 x y) - // result: (VPMAXUQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x4 x y) - // result: (VPMAXUQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x8 x y) - // result: (VPMAXUQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x16 x y) - // result: (VPMAXUB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x32 x y) - // result: (VPMAXUB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x64 x y) - // result: (VPMAXUB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMin32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMin64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x16 x y) - // result: (VMINPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x4 x y) - // result: (VMINPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x8 x y) - // result: (VMINPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x2 x y) - // result: (VMINPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x4 x y) - // result: (VMINPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x8 x y) - // result: (VMINPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x16 x y) - // result: (VPMINSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x32 x y) - // result: (VPMINSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x8 x y) - // result: (VPMINSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x16 x y) - // result: (VPMINSD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x4 x y) - // result: (VPMINSD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x8 x y) - // result: (VPMINSD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x2 x y) - // result: (VPMINSQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x4 x y) - // result: (VPMINSQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x8 x y) - // result: (VPMINSQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x16 x y) - // result: (VPMINSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x32 x y) - // result: (VPMINSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x64 x y) - // result: (VPMINSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x16 x y) - // result: (VPMINUW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x32 x y) - // result: (VPMINUW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x8 x y) - // result: (VPMINUW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x16 x y) - // result: (VPMINUD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x4 x y) - // result: (VPMINUD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x8 x y) - // result: (VPMINUD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x2 x y) - // result: (VPMINUQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x4 x y) - // result: (VPMINUQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x8 x y) - // result: (VPMINUQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x16 x y) - // result: (VPMINUB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x32 x y) - // result: (VPMINUB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x64 x y) - // result: (VPMINUB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMod16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod16u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMove(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [32] dst src mem) - // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 32 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [48] dst src mem) - // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 48 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [64] dst src mem) - // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 64 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(32) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(32) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(32) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 <= 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 <= 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 > 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) - // result: (DUFFCOPY [s] dst src mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64DUFFCOPY) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpMulByPowOf2Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x16 x y) - // result: (VSCALEFPS512 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x4 x y) - // result: (VSCALEFPS128 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x8 x y) - // result: (VSCALEFPS256 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x2 x y) - // result: (VSCALEFPD128 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x4 x y) - // result: (VSCALEFPD256 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x8 x y) - // result: (VSCALEFPD512 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt32x4 x y) - // result: (VPMULDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat32x16 x mask) + // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt32x8 x y) - // result: (VPMULDQ256 y x) + b := v.Block + // match: (MaskedSqrtFloat32x4 x mask) + // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ256) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x2 x y) - // result: (VPMULDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat32x8 x mask) + // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x4 x y) - // result: (VPMULDQ256 y x) + b := v.Block + // match: (MaskedSqrtFloat64x2 x mask) + // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ256) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x8 x y) - // result: (VPMULDQ512 y x) + b := v.Block + // match: (MaskedSqrtFloat64x4 x mask) + // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ512) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint32x4 x y) - // result: (VPMULUDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat64x8 x mask) + // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULUDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint32x8 x y) - // result: (VPMULUDQ256 y x) + b := v.Block + // match: (MaskedSubFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x2 x y) - // result: (VPMULUDQ128 y x) + b := v.Block + // match: (MaskedSubFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x4 x y) - // result: (VPMULUDQ256 y x) + b := v.Block + // match: (MaskedSubFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x8 x y) - // result: (VPMULUDQ512 y x) + b := v.Block + // match: (MaskedSubFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x16 x y) - // result: (VMULPS512 y x) + b := v.Block + // match: (MaskedSubFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x4 x y) - // result: (VMULPS128 y x) + b := v.Block + // match: (MaskedSubFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x8 x y) - // result: (VMULPS256 y x) + b := v.Block + // match: (MaskedSubInt16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x2 x y) - // result: (VMULPD128 y x) + b := v.Block + // match: (MaskedSubInt16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x4 x y) - // result: (VMULPD256 y x) + b := v.Block + // match: (MaskedSubInt16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x8 x y) - // result: (VMULPD512 y x) + b := v.Block + // match: (MaskedSubInt32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x16 x y) - // result: (VPMULHW256 y x) + b := v.Block + // match: (MaskedSubInt32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x32 x y) - // result: (VPMULHW512 y x) + b := v.Block + // match: (MaskedSubInt32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x8 x y) - // result: (VPMULHW128 y x) + b := v.Block + // match: (MaskedSubInt64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x16 x y) - // result: (VPMULHUW256 y x) + b := v.Block + // match: (MaskedSubInt64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x32 x y) - // result: (VPMULHUW512 y x) + b := v.Block + // match: (MaskedSubInt64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x8 x y) - // result: (VPMULHUW128 y x) + b := v.Block + // match: (MaskedSubInt8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x16 x y) - // result: (VPMULLW256 y x) + b := v.Block + // match: (MaskedSubInt8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x32 x y) - // result: (VPMULLW512 y x) + b := v.Block + // match: (MaskedSubInt8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x8 x y) - // result: (VPMULLW128 y x) + b := v.Block + // match: (MaskedSubUint16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x16 x y) - // result: (VPMULLD512 y x) + b := v.Block + // match: (MaskedSubUint16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x4 x y) - // result: (VPMULLD128 y x) + b := v.Block + // match: (MaskedSubUint16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x8 x y) - // result: (VPMULLD256 y x) + b := v.Block + // match: (MaskedSubUint32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x2 x y) - // result: (VPMULLQ128 y x) + b := v.Block + // match: (MaskedSubUint32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x4 x y) - // result: (VPMULLQ256 y x) + b := v.Block + // match: (MaskedSubUint32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x8 x y) - // result: (VPMULLQ512 y x) + b := v.Block + // match: (MaskedSubUint64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + // match: (MaskedSubUint64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + // match: (MaskedSubUint64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) + // match: (MaskedSubUint8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) + // match: (MaskedSubUint8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) + // match: (MaskedSubUint8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq64(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) + // match: (MaskedXorFloat32x16 x y mask) + // result: (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) + // match: (MaskedXorFloat32x4 x y mask) + // result: (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) + // match: (MaskedXorFloat32x8 x y mask) + // result: (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x2 x y mask) + // result: (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeqB(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) + // match: (MaskedXorFloat64x4 x y mask) + // result: (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) + // match: (MaskedXorFloat64x8 x y mask) + // result: (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNot(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) + b := v.Block + // match: (MaskedXorInt32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) + // match: (MaskedXorInt32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x4 x y) - // result: (VCMPPS128 [4] y x) + b := v.Block + // match: (MaskedXorInt32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x8 x y) - // result: (VCMPPS256 [4] y x) + b := v.Block + // match: (MaskedXorInt64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x2 x y) - // result: (VCMPPD128 [4] y x) + b := v.Block + // match: (MaskedXorInt64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x4 x y) - // result: (VCMPPD256 [4] y x) + b := v.Block + // match: (MaskedXorInt64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) + // match: (MaskedXorUint32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) + // match: (MaskedXorUint32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) + // match: (MaskedXorUint32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) + // match: (MaskedXorUint64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) + // match: (MaskedXorUint64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) + // match: (MaskedXorUint64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMax32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMax64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMin32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMin64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMod16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMod16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMod32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMod32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMod64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMod64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMod8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMod8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMove(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) + // match: (Move [0] _ _ mem) + // result: mem for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) + // match: (Move [48] dst src mem) + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 48 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) + // match: (Move [64] dst src mem) + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOffPtr(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { + if auxIntToInt64(v.AuxInt) != 9 { break } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x16 x y) - // result: (VORPS512 y x) + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS512) - v.AddArg2(y, x) + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x4 x y) - // result: (VORPS128 y x) + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS128) - v.AddArg2(y, x) + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x8 x y) - // result: (VORPS256 y x) + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x2 x y) - // result: (VORPD128 y x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD128) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpOrFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x4 x y) - // result: (VORPD256 y x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpOrFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x8 x y) - // result: (VORPD512 y x) + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD512) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) return true } -} -func rewriteValueAMD64_OpOrInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrInt16x16 x y) - // result: (VPOR256 y x) + // match: (Move [s] dst src mem) + // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } + return false } -func rewriteValueAMD64_OpOrInt16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] - // match: (OrInt16x8 x y) - // result: (VPOR128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpOrInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg64F(v *Value) bool { v_0 := v.Args[0] - // match: (OrInt32x16 x y) - // result: (VPORD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPORD512) - v.AddArg2(y, x) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpOrInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt32x4 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt32x8 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x2 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x4 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x8 x y) - // result: (VPORQ512 y x) + b := v.Block + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORQ512) - v.AddArg2(y, x) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt8x16(v *Value) bool { +func rewriteValueAMD64_OpNeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt8x16 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt8x32(v *Value) bool { +func rewriteValueAMD64_OpNeqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt8x32 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint16x16 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNot(v *Value) bool { v_0 := v.Args[0] - // match: (OrUint16x8 x y) - // result: (VPOR128 y x) + // match: (Not x) + // result: (XORLconst [1] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpOrUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x16 x y) - // result: (VPORD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORD512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x4 x y) - // result: (VPOR128 y x) + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x8 x y) - // result: (VPOR256 y x) + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x2 x y) - // result: (VPOR128 y x) + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x4 x y) - // result: (VPOR256 y x) + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x8 x y) - // result: (VPORQ512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORQ512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint8x16 x y) - // result: (VPOR128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint8x32 x y) - // result: (VPOR256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat32x4 x y) - // result: (VHADDPS128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPS128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat32x8 x y) - // result: (VHADDPS256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPS256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat64x2 x y) - // result: (VHADDPD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat64x4 x y) - // result: (VHADDPD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt16x16 x y) - // result: (VPHADDW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt16x8 x y) - // result: (VPHADDW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt32x4 x y) - // result: (VPHADDD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt32x8 x y) - // result: (VPHADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint16x16 x y) - // result: (VPHADDW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint16x8 x y) - // result: (VPHADDW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint32x4 x y) - // result: (VPHADDD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint32x8 x y) - // result: (VPHADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat32x4 x y) - // result: (VHSUBPS128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPS128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat32x8 x y) - // result: (VHSUBPS256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPS256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat64x2 x y) - // result: (VHSUBPD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat64x4 x y) - // result: (VHSUBPD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt16x16 x y) - // result: (VPHSUBW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt16x8 x y) - // result: (VPHSUBW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt32x4 x y) - // result: (VPHSUBD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt32x8 x y) - // result: (VPHSUBD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubUint16x16 x y) - // result: (VPHSUBW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubUint16x8 x y) - // result: (VPHSUBW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] - // match: (PairwiseSubUint32x4 x y) - // result: (VPHSUBD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBD128) - v.AddArg2(y, x) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) return true } -} -func rewriteValueAMD64_OpPairwiseSubUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (PairwiseSubUint32x8 x y) - // result: (VPHSUBD256 y x) + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBD256) - v.AddArg2(y, x) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } } @@ -46426,270 +43218,6 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPopCountInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x16 x) - // result: (VPOPCNTW256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x32 x) - // result: (VPOPCNTW512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x8 x) - // result: (VPOPCNTW128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x16 x) - // result: (VPOPCNTD512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x4 x) - // result: (VPOPCNTD128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x8 x) - // result: (VPOPCNTD256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x2 x) - // result: (VPOPCNTQ128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x4 x) - // result: (VPOPCNTQ256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x8 x) - // result: (VPOPCNTQ512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x16 x) - // result: (VPOPCNTB128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x32 x) - // result: (VPOPCNTB256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x64 x) - // result: (VPOPCNTB512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x16 x) - // result: (VPOPCNTW256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x32 x) - // result: (VPOPCNTW512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x8 x) - // result: (VPOPCNTW128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x16 x) - // result: (VPOPCNTD512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x4 x) - // result: (VPOPCNTD128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x8 x) - // result: (VPOPCNTD256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x2 x) - // result: (VPOPCNTQ128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x4 x) - // result: (VPOPCNTQ256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x8 x) - // result: (VPOPCNTQ512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x16 x) - // result: (VPOPCNTB128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x32 x) - // result: (VPOPCNTB256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x64 x) - // result: (VPOPCNTB512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB512) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -48030,370 +44558,6 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } -func rewriteValueAMD64_OpSaturatedAddInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x16 x y) - // result: (VPADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x32 x y) - // result: (VPADDSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x8 x y) - // result: (VPADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x16 x y) - // result: (VPADDSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x32 x y) - // result: (VPADDSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x64 x y) - // result: (VPADDSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x16 x y) - // result: (VPADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x32 x y) - // result: (VPADDSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x8 x y) - // result: (VPADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x16 x y) - // result: (VPADDSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x32 x y) - // result: (VPADDSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x64 x y) - // result: (VPADDSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseAddInt16x16 x y) - // result: (VPHADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseAddInt16x8 x y) - // result: (VPHADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseSubInt16x16 x y) - // result: (VPHSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseSubInt16x8 x y) - // result: (VPHSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x16 x y) - // result: (VPSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x32 x y) - // result: (VPSUBSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x8 x y) - // result: (VPSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x16 x y) - // result: (VPSUBSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x32 x y) - // result: (VPSUBSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x64 x y) - // result: (VPSUBSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x16 x y) - // result: (VPSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x32 x y) - // result: (VPSUBSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x8 x y) - // result: (VPSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x16 x y) - // result: (VPSUBSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x32 x y) - // result: (VPSUBSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x64 x y) - // result: (VPSUBSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB512) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -48819,84 +44983,6 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } -func rewriteValueAMD64_OpSignInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt16x16 x y) - // result: (VPSIGNW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt16x8 x y) - // result: (VPSIGNW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt32x4 x y) - // result: (VPSIGND128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGND128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt32x8 x y) - // result: (VPSIGND256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGND256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt8x16 x y) - // result: (VPSIGNB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt8x32 x y) - // result: (VPSIGNB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNB256) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -48941,79 +45027,13 @@ func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { x := v_0 - y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x16 x) - // result: (VSQRTPS512 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x4 x) - // result: (VSQRTPS128 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x8 x) - // result: (VSQRTPS256 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x2 x) - // result: (VSQRTPD128 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x4 x) - // result: (VSQRTPD256 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x8 x) - // result: (VSQRTPD512 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD512) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } } @@ -49158,396 +45178,6 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } -func rewriteValueAMD64_OpSubFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x16 x y) - // result: (VADDPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x4 x y) - // result: (VADDPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x8 x y) - // result: (VADDPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x2 x y) - // result: (VADDPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x4 x y) - // result: (VADDPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x8 x y) - // result: (VADDPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x16 x y) - // result: (VPSUBW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x32 x y) - // result: (VPSUBW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x8 x y) - // result: (VPSUBW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x16 x y) - // result: (VPSUBD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x4 x y) - // result: (VPSUBD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x8 x y) - // result: (VPSUBD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x2 x y) - // result: (VPSUBQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x4 x y) - // result: (VPSUBQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x8 x y) - // result: (VPSUBQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x16 x y) - // result: (VPSUBB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x32 x y) - // result: (VPSUBB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x64 x y) - // result: (VPSUBB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x16 x y) - // result: (VPSUBW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x32 x y) - // result: (VPSUBW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x8 x y) - // result: (VPSUBW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x16 x y) - // result: (VPSUBD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x4 x y) - // result: (VPSUBD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x8 x y) - // result: (VPSUBD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x2 x y) - // result: (VPSUBQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x4 x y) - // result: (VPSUBQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x8 x y) - // result: (VPSUBQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x16 x y) - // result: (VPSUBB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x32 x y) - // result: (VPSUBB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x64 x y) - // result: (VPSUBB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB512) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] // match: (Trunc x) @@ -49560,344 +45190,6 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool { return true } } -func rewriteValueAMD64_OpXorFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x16 x y) - // result: (VXORPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x4 x y) - // result: (VXORPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x8 x y) - // result: (VXORPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x2 x y) - // result: (VXORPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x4 x y) - // result: (VXORPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x8 x y) - // result: (VXORPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt16x16 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt16x8 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x16 x y) - // result: (VPXORD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x4 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x8 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x2 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x4 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x8 x y) - // result: (VPXORQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt8x16 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt8x32 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint16x16 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint16x8 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x16 x y) - // result: (VPXORD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x4 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x8 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x2 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x4 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x8 x y) - // result: (VPXORQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint8x16 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint8x32 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index cf3c1813e47740..3c8104ec2c033c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1083,408 +1083,408 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { @@ -1505,6 +1505,76 @@ func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } +func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[0], args[1], args[2], args[3]) + } +} + +func plainPanicSimdImm(s *state) { + cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) + cmp.AuxInt = 1 + // TODO: make this a standalone panic instead of reusing the overflow panic. + // Or maybe after we implement the switch table this will be obsolete anyway. + s.check(cmp, ir.Syms.Panicoverflow) +} + +func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue1I(op, t, args[1].AuxInt< Date: Thu, 12 Jun 2025 16:21:35 +0000 Subject: [PATCH 021/139] [dev.simd] cmd/compile: add round simd ops This CL is generated by CL 678195. Change-Id: Ica600229a4e9623fa45f3b5aa370cdd6d9c31686 Reviewed-on: https://go-review.googlesource.com/c/go/+/681295 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 48 + .../compile/internal/ssa/_gen/simdAMD64.rules | 212 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 32 + .../internal/ssa/_gen/simdgenericOps.go | 212 + src/cmd/compile/internal/ssa/opGen.go | 1956 +++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 3596 +++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 212 + src/simd/stubs_amd64.go | 636 +++ 8 files changed, 6904 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 253bec09ca5024..f5bc26fe742e83 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -74,6 +74,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDD512, ssa.OpAMD64VPADDQ512, ssa.OpAMD64VPADDB512, + ssa.OpAMD64VADDSUBPS128, + ssa.OpAMD64VADDSUBPS256, + ssa.OpAMD64VADDSUBPD128, + ssa.OpAMD64VADDSUBPD256, ssa.OpAMD64VANDPS128, ssa.OpAMD64VANDPS256, ssa.OpAMD64VANDPD128, @@ -564,6 +568,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked512: p = simdFp1k1fp1(s, v) + case ssa.OpAMD64VROUNDPS128, + ssa.OpAMD64VROUNDPS256, + ssa.OpAMD64VROUNDPD128, + ssa.OpAMD64VROUNDPD256, + ssa.OpAMD64VRNDSCALEPS512, + ssa.OpAMD64VRNDSCALEPS128, + ssa.OpAMD64VRNDSCALEPS256, + ssa.OpAMD64VRNDSCALEPD128, + ssa.OpAMD64VRNDSCALEPD256, + ssa.OpAMD64VRNDSCALEPD512, + ssa.OpAMD64VREDUCEPS512, + ssa.OpAMD64VREDUCEPS128, + ssa.OpAMD64VREDUCEPS256, + ssa.OpAMD64VREDUCEPD128, + ssa.OpAMD64VREDUCEPD256, + ssa.OpAMD64VREDUCEPD512: + p = simdFp11Imm8(s, v) + + case ssa.OpAMD64VRNDSCALEPSMasked512, + ssa.OpAMD64VRNDSCALEPSMasked128, + ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPDMasked128, + ssa.OpAMD64VRNDSCALEPDMasked256, + ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VREDUCEPSMasked512, + ssa.OpAMD64VREDUCEPSMasked128, + ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPDMasked128, + ssa.OpAMD64VREDUCEPDMasked256, + ssa.OpAMD64VREDUCEPDMasked512: + p = simdFp1k1fp1Imm8(s, v) + case ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, @@ -709,6 +745,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VRNDSCALEPSMasked512, + ssa.OpAMD64VRNDSCALEPSMasked128, + ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPDMasked128, + ssa.OpAMD64VRNDSCALEPDMasked256, + ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VREDUCEPSMasked512, + ssa.OpAMD64VREDUCEPSMasked128, + ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPDMasked128, + ssa.OpAMD64VREDUCEPDMasked256, + ssa.OpAMD64VREDUCEPDMasked512, ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index a9daf275484491..8bf896afb26a3c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -42,6 +42,10 @@ (AddUint8x16 ...) => (VPADDB128 ...) (AddUint8x32 ...) => (VPADDB256 ...) (AddUint8x64 ...) => (VPADDB512 ...) +(AddSubFloat32x4 ...) => (VADDSUBPS128 ...) +(AddSubFloat32x8 ...) => (VADDSUBPS256 ...) +(AddSubFloat64x2 ...) => (VADDSUBPD128 ...) +(AddSubFloat64x4 ...) => (VADDSUBPD256 ...) (AndFloat32x16 ...) => (VANDPS512 ...) (AndFloat32x4 ...) => (VANDPS128 ...) (AndFloat32x8 ...) => (VANDPS256 ...) @@ -112,6 +116,70 @@ (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) +(CeilFloat32x4 x) => (VROUNDPS128 [2] x) +(CeilFloat32x8 x) => (VROUNDPS256 [2] x) +(CeilFloat64x2 x) => (VROUNDPD128 [2] x) +(CeilFloat64x4 x) => (VROUNDPD256 [2] x) +(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+10] x) +(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) +(CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) +(CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) +(CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) +(CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) +(CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+10] x) +(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) +(DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) +(DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) +(DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) +(DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) +(DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+9] x) +(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) +(DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) +(DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) +(DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) +(DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) +(DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+8] x) +(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) +(DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) +(DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) +(DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) +(DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+11] x) +(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) +(DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) +(DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) +(DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) +(DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) +(DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) (DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) @@ -148,6 +216,22 @@ (EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) (EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) (EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(FloorFloat32x4 x) => (VROUNDPS128 [1] x) +(FloorFloat32x8 x) => (VROUNDPS256 [1] x) +(FloorFloat64x2 x) => (VROUNDPD128 [1] x) +(FloorFloat64x4 x) => (VROUNDPD256 [1] x) +(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+9] x) +(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) +(FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) +(FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) +(FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) +(FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) +(FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) @@ -370,6 +454,66 @@ (MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) +(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) @@ -406,6 +550,18 @@ (MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) (MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) (MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) +(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) @@ -697,6 +853,18 @@ (MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) (MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) (MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) +(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedRoundWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(MaskedRoundWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(MaskedRoundWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) @@ -757,6 +925,18 @@ (MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) +(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) @@ -976,6 +1156,22 @@ (PopCountUint8x16 ...) => (VPOPCNTB128 ...) (PopCountUint8x32 ...) => (VPOPCNTB256 ...) (PopCountUint8x64 ...) => (VPOPCNTB512 ...) +(RoundFloat32x4 x) => (VROUNDPS128 [0] x) +(RoundFloat32x8 x) => (VROUNDPS256 [0] x) +(RoundFloat64x2 x) => (VROUNDPD128 [0] x) +(RoundFloat64x4 x) => (VROUNDPD256 [0] x) +(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+8] x) +(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) +(RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) +(RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) +(RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) +(RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) (SaturatedAddInt16x16 ...) => (VPADDSW256 ...) (SaturatedAddInt16x32 ...) => (VPADDSW512 ...) (SaturatedAddInt16x8 ...) => (VPADDSW128 ...) @@ -1046,6 +1242,22 @@ (SubUint8x16 ...) => (VPSUBB128 ...) (SubUint8x32 ...) => (VPSUBB256 ...) (SubUint8x64 ...) => (VPSUBB512 ...) +(TruncFloat32x4 x) => (VROUNDPS128 [3] x) +(TruncFloat32x8 x) => (VROUNDPS256 [3] x) +(TruncFloat64x2 x) => (VROUNDPD128 [3] x) +(TruncFloat64x4 x) => (VROUNDPD256 [3] x) +(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+11] x) +(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) +(TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) +(TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) +(TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) +(TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) +(TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) (XorFloat32x16 ...) => (VXORPS512 ...) (XorFloat32x4 ...) => (VXORPS128 ...) (XorFloat32x8 ...) => (VXORPS256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b9709ca819210f..6881757d1a1bab 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -30,6 +30,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -58,6 +59,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -86,6 +88,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -114,6 +117,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -543,17 +547,45 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPSMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPSMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPD512", argLength: 2, reg: fp2k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 529ec09de92d5c..25a496c52f0dea 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -46,12 +46,15 @@ func simdGenericOps() []opData { {name: "SubFloat32x16", argLength: 2, commutative: false}, {name: "XorFloat32x16", argLength: 2, commutative: true}, {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "AndFloat32x4", argLength: 2, commutative: true}, {name: "AndNotFloat32x4", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "FloorFloat32x4", argLength: 1, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, @@ -86,16 +89,21 @@ func simdGenericOps() []opData { {name: "OrFloat32x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, + {name: "RoundFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "TruncFloat32x4", argLength: 1, commutative: false}, {name: "XorFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, + {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "AndFloat32x8", argLength: 2, commutative: true}, {name: "AndNotFloat32x8", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, + {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -130,16 +138,21 @@ func simdGenericOps() []opData { {name: "OrFloat32x8", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, + {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "XorFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AddSubFloat64x2", argLength: 2, commutative: false}, {name: "AndFloat64x2", argLength: 2, commutative: true}, {name: "AndNotFloat64x2", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "CeilFloat64x2", argLength: 1, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, @@ -174,16 +187,21 @@ func simdGenericOps() []opData { {name: "OrFloat64x2", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, + {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "SqrtFloat64x2", argLength: 1, commutative: false}, {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "TruncFloat64x2", argLength: 1, commutative: false}, {name: "XorFloat64x2", argLength: 2, commutative: true}, {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AddSubFloat64x4", argLength: 2, commutative: false}, {name: "AndFloat64x4", argLength: 2, commutative: true}, {name: "AndNotFloat64x4", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "CeilFloat64x4", argLength: 1, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "FloorFloat64x4", argLength: 1, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, @@ -218,8 +236,10 @@ func simdGenericOps() []opData { {name: "OrFloat64x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, + {name: "RoundFloat64x4", argLength: 1, commutative: false}, {name: "SqrtFloat64x4", argLength: 1, commutative: false}, {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "TruncFloat64x4", argLength: 1, commutative: false}, {name: "XorFloat64x4", argLength: 2, commutative: true}, {name: "AddFloat64x8", argLength: 2, commutative: true}, {name: "AndFloat64x8", argLength: 2, commutative: true}, @@ -1075,5 +1095,197 @@ func simdGenericOps() []opData { {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, + {name: "CeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index c7abca814e9a00..090cf6903218f9 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1223,6 +1223,7 @@ const ( OpAMD64VSQRTPS512 OpAMD64VXORPS512 OpAMD64VADDPS128 + OpAMD64VADDSUBPS128 OpAMD64VANDPS128 OpAMD64VANDNPS128 OpAMD64VRCP14PS128 @@ -1251,6 +1252,7 @@ const ( OpAMD64VSQRTPS128 OpAMD64VXORPS128 OpAMD64VADDPS256 + OpAMD64VADDSUBPS256 OpAMD64VANDPS256 OpAMD64VANDNPS256 OpAMD64VRCP14PS256 @@ -1279,6 +1281,7 @@ const ( OpAMD64VSQRTPS256 OpAMD64VXORPS256 OpAMD64VADDPD128 + OpAMD64VADDSUBPD128 OpAMD64VANDPD128 OpAMD64VANDNPD128 OpAMD64VRCP14PD128 @@ -1307,6 +1310,7 @@ const ( OpAMD64VSQRTPD128 OpAMD64VXORPD128 OpAMD64VADDPD256 + OpAMD64VADDSUBPD256 OpAMD64VANDPD256 OpAMD64VANDNPD256 OpAMD64VRCP14PD256 @@ -1736,17 +1740,45 @@ const ( OpAMD64VPMINUBMasked512 OpAMD64VPMAXUB512 OpAMD64VPMINUB512 + OpAMD64VRNDSCALEPS512 + OpAMD64VREDUCEPS512 OpAMD64VCMPPS512 + OpAMD64VRNDSCALEPSMasked512 + OpAMD64VREDUCEPSMasked512 OpAMD64VCMPPSMasked512 + OpAMD64VROUNDPS128 + OpAMD64VRNDSCALEPS128 + OpAMD64VREDUCEPS128 OpAMD64VCMPPS128 + OpAMD64VRNDSCALEPSMasked128 + OpAMD64VREDUCEPSMasked128 OpAMD64VCMPPSMasked128 + OpAMD64VROUNDPS256 + OpAMD64VRNDSCALEPS256 + OpAMD64VREDUCEPS256 OpAMD64VCMPPS256 + OpAMD64VRNDSCALEPSMasked256 + OpAMD64VREDUCEPSMasked256 OpAMD64VCMPPSMasked256 + OpAMD64VROUNDPD128 + OpAMD64VRNDSCALEPD128 + OpAMD64VREDUCEPD128 OpAMD64VCMPPD128 + OpAMD64VRNDSCALEPDMasked128 + OpAMD64VREDUCEPDMasked128 OpAMD64VCMPPDMasked128 + OpAMD64VROUNDPD256 + OpAMD64VRNDSCALEPD256 + OpAMD64VREDUCEPD256 OpAMD64VCMPPD256 + OpAMD64VRNDSCALEPDMasked256 + OpAMD64VREDUCEPDMasked256 OpAMD64VCMPPDMasked256 + OpAMD64VRNDSCALEPD512 + OpAMD64VREDUCEPD512 OpAMD64VCMPPD512 + OpAMD64VRNDSCALEPDMasked512 + OpAMD64VREDUCEPDMasked512 OpAMD64VCMPPDMasked512 OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 @@ -4065,12 +4097,15 @@ const ( OpSubFloat32x16 OpXorFloat32x16 OpAddFloat32x4 + OpAddSubFloat32x4 OpAndFloat32x4 OpAndNotFloat32x4 OpApproximateReciprocalFloat32x4 OpApproximateReciprocalOfSqrtFloat32x4 + OpCeilFloat32x4 OpDivFloat32x4 OpEqualFloat32x4 + OpFloorFloat32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 OpIsNanFloat32x4 @@ -4105,16 +4140,21 @@ const ( OpOrFloat32x4 OpPairwiseAddFloat32x4 OpPairwiseSubFloat32x4 + OpRoundFloat32x4 OpSqrtFloat32x4 OpSubFloat32x4 + OpTruncFloat32x4 OpXorFloat32x4 OpAddFloat32x8 + OpAddSubFloat32x8 OpAndFloat32x8 OpAndNotFloat32x8 OpApproximateReciprocalFloat32x8 OpApproximateReciprocalOfSqrtFloat32x8 + OpCeilFloat32x8 OpDivFloat32x8 OpEqualFloat32x8 + OpFloorFloat32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 OpIsNanFloat32x8 @@ -4149,16 +4189,21 @@ const ( OpOrFloat32x8 OpPairwiseAddFloat32x8 OpPairwiseSubFloat32x8 + OpRoundFloat32x8 OpSqrtFloat32x8 OpSubFloat32x8 + OpTruncFloat32x8 OpXorFloat32x8 OpAddFloat64x2 + OpAddSubFloat64x2 OpAndFloat64x2 OpAndNotFloat64x2 OpApproximateReciprocalFloat64x2 OpApproximateReciprocalOfSqrtFloat64x2 + OpCeilFloat64x2 OpDivFloat64x2 OpEqualFloat64x2 + OpFloorFloat64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 OpIsNanFloat64x2 @@ -4193,16 +4238,21 @@ const ( OpOrFloat64x2 OpPairwiseAddFloat64x2 OpPairwiseSubFloat64x2 + OpRoundFloat64x2 OpSqrtFloat64x2 OpSubFloat64x2 + OpTruncFloat64x2 OpXorFloat64x2 OpAddFloat64x4 + OpAddSubFloat64x4 OpAndFloat64x4 OpAndNotFloat64x4 OpApproximateReciprocalFloat64x4 OpApproximateReciprocalOfSqrtFloat64x4 + OpCeilFloat64x4 OpDivFloat64x4 OpEqualFloat64x4 + OpFloorFloat64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 OpIsNanFloat64x4 @@ -4237,8 +4287,10 @@ const ( OpOrFloat64x4 OpPairwiseAddFloat64x4 OpPairwiseSubFloat64x4 + OpRoundFloat64x4 OpSqrtFloat64x4 OpSubFloat64x4 + OpTruncFloat64x4 OpXorFloat64x4 OpAddFloat64x8 OpAndFloat64x8 @@ -5094,6 +5146,198 @@ const ( OpSaturatedAddUint8x64 OpSaturatedSubUint8x64 OpSubUint8x64 + OpCeilSuppressExceptionWithPrecisionFloat32x16 + OpCeilWithPrecisionFloat32x16 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithCeilWithPrecisionFloat32x16 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithFloorWithPrecisionFloat32x16 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithRoundWithPrecisionFloat32x16 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithTruncWithPrecisionFloat32x16 + OpFloorSuppressExceptionWithPrecisionFloat32x16 + OpFloorWithPrecisionFloat32x16 + OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16 + OpMaskedCeilWithPrecisionFloat32x16 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithCeilWithPrecisionFloat32x16 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithFloorWithPrecisionFloat32x16 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithRoundWithPrecisionFloat32x16 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithTruncWithPrecisionFloat32x16 + OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16 + OpMaskedFloorWithPrecisionFloat32x16 + OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16 + OpMaskedRoundWithPrecisionFloat32x16 + OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16 + OpMaskedTruncWithPrecisionFloat32x16 + OpRoundSuppressExceptionWithPrecisionFloat32x16 + OpRoundWithPrecisionFloat32x16 + OpTruncSuppressExceptionWithPrecisionFloat32x16 + OpTruncWithPrecisionFloat32x16 + OpCeilSuppressExceptionWithPrecisionFloat32x4 + OpCeilWithPrecisionFloat32x4 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithCeilWithPrecisionFloat32x4 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithFloorWithPrecisionFloat32x4 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithRoundWithPrecisionFloat32x4 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithTruncWithPrecisionFloat32x4 + OpFloorSuppressExceptionWithPrecisionFloat32x4 + OpFloorWithPrecisionFloat32x4 + OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4 + OpMaskedCeilWithPrecisionFloat32x4 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithCeilWithPrecisionFloat32x4 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithFloorWithPrecisionFloat32x4 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithRoundWithPrecisionFloat32x4 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithTruncWithPrecisionFloat32x4 + OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4 + OpMaskedFloorWithPrecisionFloat32x4 + OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4 + OpMaskedRoundWithPrecisionFloat32x4 + OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4 + OpMaskedTruncWithPrecisionFloat32x4 + OpRoundSuppressExceptionWithPrecisionFloat32x4 + OpRoundWithPrecisionFloat32x4 + OpTruncSuppressExceptionWithPrecisionFloat32x4 + OpTruncWithPrecisionFloat32x4 + OpCeilSuppressExceptionWithPrecisionFloat32x8 + OpCeilWithPrecisionFloat32x8 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithCeilWithPrecisionFloat32x8 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithFloorWithPrecisionFloat32x8 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithRoundWithPrecisionFloat32x8 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithTruncWithPrecisionFloat32x8 + OpFloorSuppressExceptionWithPrecisionFloat32x8 + OpFloorWithPrecisionFloat32x8 + OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8 + OpMaskedCeilWithPrecisionFloat32x8 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithCeilWithPrecisionFloat32x8 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithFloorWithPrecisionFloat32x8 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithRoundWithPrecisionFloat32x8 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithTruncWithPrecisionFloat32x8 + OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8 + OpMaskedFloorWithPrecisionFloat32x8 + OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8 + OpMaskedRoundWithPrecisionFloat32x8 + OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8 + OpMaskedTruncWithPrecisionFloat32x8 + OpRoundSuppressExceptionWithPrecisionFloat32x8 + OpRoundWithPrecisionFloat32x8 + OpTruncSuppressExceptionWithPrecisionFloat32x8 + OpTruncWithPrecisionFloat32x8 + OpCeilSuppressExceptionWithPrecisionFloat64x2 + OpCeilWithPrecisionFloat64x2 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithCeilWithPrecisionFloat64x2 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithFloorWithPrecisionFloat64x2 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithRoundWithPrecisionFloat64x2 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithTruncWithPrecisionFloat64x2 + OpFloorSuppressExceptionWithPrecisionFloat64x2 + OpFloorWithPrecisionFloat64x2 + OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2 + OpMaskedCeilWithPrecisionFloat64x2 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithCeilWithPrecisionFloat64x2 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithFloorWithPrecisionFloat64x2 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithRoundWithPrecisionFloat64x2 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithTruncWithPrecisionFloat64x2 + OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2 + OpMaskedFloorWithPrecisionFloat64x2 + OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2 + OpMaskedRoundWithPrecisionFloat64x2 + OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2 + OpMaskedTruncWithPrecisionFloat64x2 + OpRoundSuppressExceptionWithPrecisionFloat64x2 + OpRoundWithPrecisionFloat64x2 + OpTruncSuppressExceptionWithPrecisionFloat64x2 + OpTruncWithPrecisionFloat64x2 + OpCeilSuppressExceptionWithPrecisionFloat64x4 + OpCeilWithPrecisionFloat64x4 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithCeilWithPrecisionFloat64x4 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithFloorWithPrecisionFloat64x4 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithRoundWithPrecisionFloat64x4 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithTruncWithPrecisionFloat64x4 + OpFloorSuppressExceptionWithPrecisionFloat64x4 + OpFloorWithPrecisionFloat64x4 + OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4 + OpMaskedCeilWithPrecisionFloat64x4 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithCeilWithPrecisionFloat64x4 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithFloorWithPrecisionFloat64x4 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithRoundWithPrecisionFloat64x4 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithTruncWithPrecisionFloat64x4 + OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4 + OpMaskedFloorWithPrecisionFloat64x4 + OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4 + OpMaskedRoundWithPrecisionFloat64x4 + OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4 + OpMaskedTruncWithPrecisionFloat64x4 + OpRoundSuppressExceptionWithPrecisionFloat64x4 + OpRoundWithPrecisionFloat64x4 + OpTruncSuppressExceptionWithPrecisionFloat64x4 + OpTruncWithPrecisionFloat64x4 + OpCeilSuppressExceptionWithPrecisionFloat64x8 + OpCeilWithPrecisionFloat64x8 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithCeilWithPrecisionFloat64x8 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithFloorWithPrecisionFloat64x8 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithRoundWithPrecisionFloat64x8 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithTruncWithPrecisionFloat64x8 + OpFloorSuppressExceptionWithPrecisionFloat64x8 + OpFloorWithPrecisionFloat64x8 + OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8 + OpMaskedCeilWithPrecisionFloat64x8 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithCeilWithPrecisionFloat64x8 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithFloorWithPrecisionFloat64x8 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithRoundWithPrecisionFloat64x8 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithTruncWithPrecisionFloat64x8 + OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8 + OpMaskedFloorWithPrecisionFloat64x8 + OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8 + OpMaskedRoundWithPrecisionFloat64x8 + OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8 + OpMaskedTruncWithPrecisionFloat64x8 + OpRoundSuppressExceptionWithPrecisionFloat64x8 + OpRoundWithPrecisionFloat64x8 + OpTruncSuppressExceptionWithPrecisionFloat64x8 + OpTruncWithPrecisionFloat64x8 ) var opcodeTable = [...]opInfo{ @@ -18091,6 +18335,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPS128", + argLen: 2, + asm: x86.AVADDSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPS128", argLen: 2, @@ -18506,6 +18764,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPS256", argLen: 2, @@ -18921,6 +19193,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPD128", + argLen: 2, + asm: x86.AVADDSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPD128", argLen: 2, @@ -19336,6 +19622,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPD256", argLen: 2, @@ -25772,6 +26072,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS512", auxType: auxInt8, @@ -25788,6 +26116,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPSMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPSMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPSMasked512", auxType: auxInt8, @@ -25805,6 +26163,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS128", auxType: auxInt8, @@ -25821,6 +26221,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPSMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPSMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPSMasked128", auxType: auxInt8, @@ -25838,6 +26268,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS256", auxType: auxInt8, @@ -25854,6 +26326,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPSMasked256", auxType: auxInt8, @@ -25871,6 +26373,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD128", auxType: auxInt8, @@ -25887,6 +26431,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPDMasked128", auxType: auxInt8, @@ -25904,6 +26478,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD256", auxType: auxInt8, @@ -25920,6 +26536,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPDMasked256", auxType: auxInt8, @@ -25937,6 +26583,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD512", auxType: auxInt8, @@ -25953,6 +26627,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPDMasked512", auxType: auxInt8, @@ -54128,6 +54832,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat32x4", + argLen: 2, + generic: true, + }, { name: "AndFloat32x4", argLen: 2, @@ -54150,6 +54859,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat32x4", + argLen: 1, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, @@ -54161,6 +54875,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat32x4", + argLen: 1, + generic: true, + }, { name: "GreaterFloat32x4", argLen: 2, @@ -54348,6 +55067,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat32x4", + argLen: 1, + generic: true, + }, { name: "SqrtFloat32x4", argLen: 1, @@ -54358,6 +55082,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat32x4", + argLen: 1, + generic: true, + }, { name: "XorFloat32x4", argLen: 2, @@ -54370,6 +55099,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat32x8", + argLen: 2, + generic: true, + }, { name: "AndFloat32x8", argLen: 2, @@ -54392,6 +55126,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat32x8", + argLen: 1, + generic: true, + }, { name: "DivFloat32x8", argLen: 2, @@ -54403,6 +55142,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat32x8", + argLen: 1, + generic: true, + }, { name: "GreaterFloat32x8", argLen: 2, @@ -54590,6 +55334,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat32x8", + argLen: 1, + generic: true, + }, { name: "SqrtFloat32x8", argLen: 1, @@ -54600,6 +55349,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat32x8", + argLen: 1, + generic: true, + }, { name: "XorFloat32x8", argLen: 2, @@ -54612,6 +55366,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat64x2", + argLen: 2, + generic: true, + }, { name: "AndFloat64x2", argLen: 2, @@ -54634,6 +55393,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat64x2", + argLen: 1, + generic: true, + }, { name: "DivFloat64x2", argLen: 2, @@ -54645,6 +55409,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat64x2", + argLen: 1, + generic: true, + }, { name: "GreaterFloat64x2", argLen: 2, @@ -54832,6 +55601,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat64x2", + argLen: 1, + generic: true, + }, { name: "SqrtFloat64x2", argLen: 1, @@ -54842,6 +55616,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat64x2", + argLen: 1, + generic: true, + }, { name: "XorFloat64x2", argLen: 2, @@ -54854,6 +55633,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat64x4", + argLen: 2, + generic: true, + }, { name: "AndFloat64x4", argLen: 2, @@ -54876,6 +55660,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat64x4", + argLen: 1, + generic: true, + }, { name: "DivFloat64x4", argLen: 2, @@ -54887,6 +55676,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat64x4", + argLen: 1, + generic: true, + }, { name: "GreaterFloat64x4", argLen: 2, @@ -55074,6 +55868,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat64x4", + argLen: 1, + generic: true, + }, { name: "SqrtFloat64x4", argLen: 1, @@ -55084,6 +55883,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat64x4", + argLen: 1, + generic: true, + }, { name: "XorFloat64x4", argLen: 2, @@ -59832,6 +60636,1158 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 86fbc988cfad2f..a6cf0a0b7bd0b1 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -664,6 +664,18 @@ func rewriteValueAMD64(v *Value) bool { case OpAddPtr: v.Op = OpAMD64ADDQ return true + case OpAddSubFloat32x4: + v.Op = OpAMD64VADDSUBPS128 + return true + case OpAddSubFloat32x8: + v.Op = OpAMD64VADDSUBPS256 + return true + case OpAddSubFloat64x2: + v.Op = OpAMD64VADDSUBPD128 + return true + case OpAddSubFloat64x4: + v.Op = OpAMD64VADDSUBPD256 + return true case OpAddUint16x16: v.Op = OpAMD64VPADDW256 return true @@ -994,6 +1006,38 @@ func rewriteValueAMD64(v *Value) bool { return true case OpCeil: return rewriteValueAMD64_OpCeil(v) + case OpCeilFloat32x4: + return rewriteValueAMD64_OpCeilFloat32x4(v) + case OpCeilFloat32x8: + return rewriteValueAMD64_OpCeilFloat32x8(v) + case OpCeilFloat64x2: + return rewriteValueAMD64_OpCeilFloat64x2(v) + case OpCeilFloat64x4: + return rewriteValueAMD64_OpCeilFloat64x4(v) + case OpCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v) + case OpCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v) + case OpCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v) + case OpCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v) + case OpCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v) + case OpCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v) case OpClosureCall: v.Op = OpAMD64CALLclosure return true @@ -1080,6 +1124,102 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v) + case OpDiffWithCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v) + case OpDiffWithCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v) + case OpDiffWithCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v) + case OpDiffWithCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) + case OpDiffWithCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) + case OpDiffWithFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v) + case OpDiffWithFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v) + case OpDiffWithFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v) + case OpDiffWithFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) + case OpDiffWithFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) + case OpDiffWithRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v) + case OpDiffWithRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v) + case OpDiffWithRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v) + case OpDiffWithRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) + case OpDiffWithRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) + case OpDiffWithTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v) + case OpDiffWithTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v) + case OpDiffWithTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v) + case OpDiffWithTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v) + case OpDiffWithTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -1211,6 +1351,38 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFMA(v) case OpFloor: return rewriteValueAMD64_OpFloor(v) + case OpFloorFloat32x4: + return rewriteValueAMD64_OpFloorFloat32x4(v) + case OpFloorFloat32x8: + return rewriteValueAMD64_OpFloorFloat32x8(v) + case OpFloorFloat64x2: + return rewriteValueAMD64_OpFloorFloat64x2(v) + case OpFloorFloat64x4: + return rewriteValueAMD64_OpFloorFloat64x4(v) + case OpFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v) + case OpFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v) + case OpFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v) + case OpFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v) + case OpFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) + case OpFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -1772,6 +1944,126 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAverageUint8x32(v) case OpMaskedAverageUint8x64: return rewriteValueAMD64_OpMaskedAverageUint8x64(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v) + case OpMaskedCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v) + case OpMaskedCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v) + case OpMaskedCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v) + case OpMaskedCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v) + case OpMaskedCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v) + case OpMaskedDiffWithCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v) + case OpMaskedDiffWithCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v) + case OpMaskedDiffWithCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v) + case OpMaskedDiffWithCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v) + case OpMaskedDiffWithCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v) + case OpMaskedDiffWithFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v) + case OpMaskedDiffWithFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v) + case OpMaskedDiffWithFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v) + case OpMaskedDiffWithFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v) + case OpMaskedDiffWithFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v) + case OpMaskedDiffWithRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v) + case OpMaskedDiffWithRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v) + case OpMaskedDiffWithRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v) + case OpMaskedDiffWithRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v) + case OpMaskedDiffWithRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v) + case OpMaskedDiffWithTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v) + case OpMaskedDiffWithTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v) + case OpMaskedDiffWithTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v) + case OpMaskedDiffWithTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v) + case OpMaskedDiffWithTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v) case OpMaskedDivFloat32x16: return rewriteValueAMD64_OpMaskedDivFloat32x16(v) case OpMaskedDivFloat32x4: @@ -1844,6 +2136,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedEqualUint8x32(v) case OpMaskedEqualUint8x64: return rewriteValueAMD64_OpMaskedEqualUint8x64(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v) + case OpMaskedFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v) + case OpMaskedFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v) + case OpMaskedFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v) + case OpMaskedFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) + case OpMaskedFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -2426,6 +2742,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) case OpMaskedPopCountUint8x64: return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v) + case OpMaskedRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v) + case OpMaskedRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v) + case OpMaskedRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v) + case OpMaskedRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v) + case OpMaskedRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v) case OpMaskedSaturatedAddInt16x16: return rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v) case OpMaskedSaturatedAddInt16x32: @@ -2546,6 +2886,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSubUint8x32(v) case OpMaskedSubUint8x64: return rewriteValueAMD64_OpMaskedSubUint8x64(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v) + case OpMaskedTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v) + case OpMaskedTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v) + case OpMaskedTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v) + case OpMaskedTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v) + case OpMaskedTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v) case OpMaskedXorFloat32x16: return rewriteValueAMD64_OpMaskedXorFloat32x16(v) case OpMaskedXorFloat32x4: @@ -3292,8 +3656,40 @@ func rewriteValueAMD64(v *Value) bool { case OpRound64F: v.Op = OpAMD64LoweredRound64F return true + case OpRoundFloat32x4: + return rewriteValueAMD64_OpRoundFloat32x4(v) + case OpRoundFloat32x8: + return rewriteValueAMD64_OpRoundFloat32x8(v) + case OpRoundFloat64x2: + return rewriteValueAMD64_OpRoundFloat64x2(v) + case OpRoundFloat64x4: + return rewriteValueAMD64_OpRoundFloat64x4(v) + case OpRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) + case OpRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v) + case OpRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v) + case OpRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v) + case OpRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v) + case OpRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v) + case OpRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -3653,6 +4049,38 @@ func rewriteValueAMD64(v *Value) bool { case OpTrunc64to8: v.Op = OpCopy return true + case OpTruncFloat32x4: + return rewriteValueAMD64_OpTruncFloat32x4(v) + case OpTruncFloat32x8: + return rewriteValueAMD64_OpTruncFloat32x8(v) + case OpTruncFloat64x2: + return rewriteValueAMD64_OpTruncFloat64x2(v) + case OpTruncFloat64x4: + return rewriteValueAMD64_OpTruncFloat64x4(v) + case OpTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v) + case OpTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v) + case OpTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v) + case OpTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v) + case OpTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) + case OpTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) case OpWB: v.Op = OpAMD64LoweredWB return true @@ -27029,6 +27457,210 @@ func rewriteValueAMD64_OpCeil(v *Value) bool { return true } } +func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat32x4 x) + // result: (VROUNDPS128 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat32x8 x) + // result: (VROUNDPS256 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat64x2 x) + // result: (VROUNDPD128 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat64x4 x) + // result: (VROUNDPD256 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpCondSelect(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -28162,6 +28794,630 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28843,6 +30099,210 @@ func rewriteValueAMD64_OpFloor(v *Value) bool { return true } } +func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat32x4 x) + // result: (VROUNDPS128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat32x8 x) + // result: (VROUNDPS256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat64x2 x) + // result: (VROUNDPD128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat64x4 x) + // result: (VROUNDPD256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] // match: (GetG mem) @@ -33790,6 +35250,1086 @@ func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -34546,6 +37086,222 @@ func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -40348,6 +43104,222 @@ func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -41416,6 +44388,222 @@ func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43218,6 +46406,132 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } +func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat32x4 x) + // result: (VROUNDPS128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat32x8 x) + // result: (VROUNDPS256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat64x2 x) + // result: (VROUNDPD128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat64x4 x) + // result: (VROUNDPD256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -43230,6 +46544,84 @@ func rewriteValueAMD64_OpRoundToEven(v *Value) bool { return true } } +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -45190,6 +48582,210 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool { return true } } +func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat32x4 x) + // result: (VROUNDPS128 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat32x8 x) + // result: (VROUNDPS256 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat64x2 x) + // result: (VROUNDPD128 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat64x4 x) + // result: (VROUNDPD256 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3c8104ec2c033c..d05d0e2066f7f6 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -16,16 +16,32 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) @@ -87,6 +103,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) @@ -110,6 +127,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) @@ -133,6 +151,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Add", opLen2(ssa.OpAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) @@ -156,6 +175,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Add", opLen2(ssa.OpAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) @@ -1083,6 +1103,198 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 5dfb49cf2d5f59..d433b67c9aefc4 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -19,36 +19,84 @@ func (x Float32x4) ApproximateReciprocal() Float32x4 // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Ceil() Float32x4 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Floor() Float32x4 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 + // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x4) Sqrt() Float32x4 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Trunc() Float32x4 + // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocal() Float32x8 // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Ceil() Float32x8 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Floor() Float32x8 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 + // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x8) Sqrt() Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Trunc() Float32x8 + // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocal() Float64x2 // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Ceil() Float64x2 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Floor() Float64x2 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 + // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x2) Sqrt() Float64x2 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Trunc() Float64x2 + // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocal() Float64x4 // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Ceil() Float64x4 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Floor() Float64x4 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 + // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x4) Sqrt() Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Trunc() Float64x4 + // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocal() Float64x8 @@ -246,6 +294,9 @@ func (x Float32x16) Xor(y Float32x16) Float32x16 // Asm: VADDPS, CPU Feature: AVX func (x Float32x4) Add(y Float32x4) Float32x4 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x4) AddSub(y Float32x4) Float32x4 + // Asm: VANDPS, CPU Feature: AVX func (x Float32x4) And(y Float32x4) Float32x4 @@ -333,6 +384,9 @@ func (x Float32x4) Xor(y Float32x4) Float32x4 // Asm: VADDPS, CPU Feature: AVX func (x Float32x8) Add(y Float32x8) Float32x8 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x8) AddSub(y Float32x8) Float32x8 + // Asm: VANDPS, CPU Feature: AVX func (x Float32x8) And(y Float32x8) Float32x8 @@ -420,6 +474,9 @@ func (x Float32x8) Xor(y Float32x8) Float32x8 // Asm: VADDPD, CPU Feature: AVX func (x Float64x2) Add(y Float64x2) Float64x2 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x2) AddSub(y Float64x2) Float64x2 + // Asm: VANDPD, CPU Feature: AVX func (x Float64x2) And(y Float64x2) Float64x2 @@ -507,6 +564,9 @@ func (x Float64x2) Xor(y Float64x2) Float64x2 // Asm: VADDPD, CPU Feature: AVX func (x Float64x4) Add(y Float64x4) Float64x4 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x4) AddSub(y Float64x4) Float64x4 + // Asm: VANDPD, CPU Feature: AVX func (x Float64x4) And(y Float64x4) Float64x4 @@ -4112,6 +4172,582 @@ func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + // Float64x8 converts from Float32x16 to Float64x8 func (from Float32x16) AsFloat64x8() (to Float64x8) From 9ba7db36b5e482923b956975f9e6b30df8117fd7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:24:24 +0000 Subject: [PATCH 022/139] [dev.simd] cmd/compile: add dot product ops This CL is generated by CL 678515. Change-Id: Iac7c424bbbffc2514dff3495d6c408fa9c998c2f Reviewed-on: https://go-review.googlesource.com/c/go/+/681296 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 21 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 15 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 13 + .../internal/ssa/_gen/simdgenericOps.go | 15 + src/cmd/compile/internal/ssa/opGen.go | 294 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 160 ++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 15 + src/simd/stubs_amd64.go | 75 +++++ 8 files changed, 607 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index f5bc26fe742e83..02353c7f7b47b1 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -228,6 +228,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VORPD512, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPMADDWD256, + ssa.OpAMD64VPMADDWD128, + ssa.OpAMD64VPMADDWD512, ssa.OpAMD64VHADDPS128, ssa.OpAMD64VHADDPS256, ssa.OpAMD64VHADDPD128, @@ -260,6 +263,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSB256, ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPMADDUBSW128, + ssa.OpAMD64VPMADDUBSW256, + ssa.OpAMD64VPMADDUBSW512, ssa.OpAMD64VPSIGNW256, ssa.OpAMD64VPSIGNW128, ssa.OpAMD64VPSIGND128, @@ -460,6 +466,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPADDSWMasked128, @@ -472,6 +481,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPSUBWMasked256, ssa.OpAMD64VPSUBWMasked512, ssa.OpAMD64VPSUBWMasked128, @@ -600,7 +612,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked512: p = simdFp1k1fp1Imm8(s, v) - case ssa.OpAMD64VCMPPS128, + case ssa.OpAMD64VDPPD128, + ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256: @@ -868,6 +881,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, ssa.OpAMD64VPOPCNTWMasked128, @@ -892,6 +908,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 8bf896afb26a3c..d5caf09daccd6f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -186,6 +186,7 @@ (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) +(DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) (EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) @@ -829,6 +830,9 @@ (MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) @@ -889,6 +893,9 @@ (MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) @@ -1108,6 +1115,9 @@ (OrUint64x8 ...) => (VPORQ512 ...) (OrUint8x16 ...) => (VPOR128 ...) (OrUint8x32 ...) => (VPOR256 ...) +(PairDotProdInt16x16 ...) => (VPMADDWD256 ...) +(PairDotProdInt16x32 ...) => (VPMADDWD512 ...) +(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) @@ -1200,6 +1210,11 @@ (SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) (SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) (SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) +(SaturatedUnsignedSignedPairDotProdUint16x16 ...) => (VPMADDUBSW256 ...) +(SaturatedUnsignedSignedPairDotProdUint16x32 ...) => (VPMADDUBSW512 ...) +(SaturatedUnsignedSignedPairDotProdUint16x8 ...) => (VPMADDUBSW128 ...) +(SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) +(SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) (SignInt32x4 ...) => (VPSIGND128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 6881757d1a1bab..f580973c9dcb19 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -185,6 +185,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -194,6 +195,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -216,6 +218,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -224,6 +227,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -242,6 +246,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -251,6 +256,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -480,6 +486,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -488,14 +495,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -535,12 +545,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -570,6 +582,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 25a496c52f0dea..3e3411e0dfb180 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -151,6 +151,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, @@ -304,6 +305,7 @@ func simdGenericOps() []opData { {name: "MaskedMulHighInt16x16", argLength: 3, commutative: true}, {name: "MaskedMulLowInt16x16", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt16x16", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdInt16x16", argLength: 3, commutative: false}, {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, @@ -314,6 +316,7 @@ func simdGenericOps() []opData { {name: "MulLowInt16x16", argLength: 2, commutative: true}, {name: "NotEqualInt16x16", argLength: 2, commutative: true}, {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "PopCountInt16x16", argLength: 1, commutative: false}, @@ -343,6 +346,7 @@ func simdGenericOps() []opData { {name: "MaskedMulHighInt16x32", argLength: 3, commutative: true}, {name: "MaskedMulLowInt16x32", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt16x32", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdInt16x32", argLength: 3, commutative: false}, {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, @@ -352,6 +356,7 @@ func simdGenericOps() []opData { {name: "MulHighInt16x32", argLength: 2, commutative: true}, {name: "MulLowInt16x32", argLength: 2, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, + {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, {name: "PopCountInt16x32", argLength: 1, commutative: false}, {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, @@ -377,6 +382,7 @@ func simdGenericOps() []opData { {name: "MaskedMulHighInt16x8", argLength: 3, commutative: true}, {name: "MaskedMulLowInt16x8", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt16x8", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdInt16x8", argLength: 3, commutative: false}, {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, @@ -387,6 +393,7 @@ func simdGenericOps() []opData { {name: "MulLowInt16x8", argLength: 2, commutative: true}, {name: "NotEqualInt16x8", argLength: 2, commutative: true}, {name: "OrInt16x8", argLength: 2, commutative: true}, + {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "PopCountInt16x8", argLength: 1, commutative: false}, @@ -732,6 +739,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", argLength: 3, commutative: false}, {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, @@ -743,6 +751,7 @@ func simdGenericOps() []opData { {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint16x16", argLength: 2, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, @@ -766,6 +775,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", argLength: 3, commutative: false}, {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, @@ -774,6 +784,7 @@ func simdGenericOps() []opData { {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint16x32", argLength: 2, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, @@ -798,6 +809,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", argLength: 3, commutative: false}, {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, @@ -809,6 +821,7 @@ func simdGenericOps() []opData { {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint16x8", argLength: 2, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, @@ -1033,6 +1046,7 @@ func simdGenericOps() []opData { {name: "PopCountUint8x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, {name: "SubUint8x16", argLength: 2, commutative: false}, {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "AddUint8x32", argLength: 2, commutative: true}, @@ -1065,6 +1079,7 @@ func simdGenericOps() []opData { {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, {name: "SubUint8x32", argLength: 2, commutative: false}, {name: "XorUint8x32", argLength: 2, commutative: true}, {name: "AddUint8x64", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 090cf6903218f9..3ef08ae5559121 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1378,6 +1378,7 @@ const ( OpAMD64VPMINSWMasked256 OpAMD64VPMULHWMasked256 OpAMD64VPMULLWMasked256 + OpAMD64VPMADDWDMasked256 OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSWMasked256 OpAMD64VPSUBSWMasked256 @@ -1387,6 +1388,7 @@ const ( OpAMD64VPMULHW256 OpAMD64VPMULLW256 OpAMD64VPOR256 + OpAMD64VPMADDWD256 OpAMD64VPHADDW256 OpAMD64VPHSUBW256 OpAMD64VPOPCNTW256 @@ -1409,6 +1411,7 @@ const ( OpAMD64VPMINSWMasked512 OpAMD64VPMULHWMasked512 OpAMD64VPMULLWMasked512 + OpAMD64VPMADDWDMasked512 OpAMD64VPOPCNTWMasked512 OpAMD64VPADDSWMasked512 OpAMD64VPSUBSWMasked512 @@ -1417,6 +1420,7 @@ const ( OpAMD64VPMINSW512 OpAMD64VPMULHW512 OpAMD64VPMULLW512 + OpAMD64VPMADDWD512 OpAMD64VPOPCNTW512 OpAMD64VPADDSW512 OpAMD64VPSUBSW512 @@ -1435,6 +1439,7 @@ const ( OpAMD64VPMINSWMasked128 OpAMD64VPMULHWMasked128 OpAMD64VPMULLWMasked128 + OpAMD64VPMADDWDMasked128 OpAMD64VPOPCNTWMasked128 OpAMD64VPADDSWMasked128 OpAMD64VPSUBSWMasked128 @@ -1444,6 +1449,7 @@ const ( OpAMD64VPMULHW128 OpAMD64VPMULLW128 OpAMD64VPOR128 + OpAMD64VPMADDWD128 OpAMD64VPHADDW128 OpAMD64VPHSUBW128 OpAMD64VPOPCNTW128 @@ -1673,6 +1679,7 @@ const ( OpAMD64VPMAXUWMasked256 OpAMD64VPMINUWMasked256 OpAMD64VPMULHUWMasked256 + OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUW256 OpAMD64VPMINUW256 OpAMD64VPMULHUW256 @@ -1681,14 +1688,17 @@ const ( OpAMD64VPMAXUWMasked512 OpAMD64VPMINUWMasked512 OpAMD64VPMULHUWMasked512 + OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUW512 OpAMD64VPMINUW512 OpAMD64VPMULHUW512 + OpAMD64VPMADDUBSW512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUWMasked128 OpAMD64VPMINUWMasked128 OpAMD64VPMULHUWMasked128 + OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUW128 OpAMD64VPMINUW128 OpAMD64VPMULHUW128 @@ -1728,12 +1738,14 @@ const ( OpAMD64VPMINUBMasked128 OpAMD64VPMAXUB128 OpAMD64VPMINUB128 + OpAMD64VPMADDUBSW128 OpAMD64VPAVGB256 OpAMD64VPAVGBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 OpAMD64VPMAXUB256 OpAMD64VPMINUB256 + OpAMD64VPMADDUBSW256 OpAMD64VPAVGB512 OpAMD64VPAVGBMasked512 OpAMD64VPMAXUBMasked512 @@ -1763,6 +1775,7 @@ const ( OpAMD64VROUNDPD128 OpAMD64VRNDSCALEPD128 OpAMD64VREDUCEPD128 + OpAMD64VDPPD128 OpAMD64VCMPPD128 OpAMD64VRNDSCALEPDMasked128 OpAMD64VREDUCEPDMasked128 @@ -4202,6 +4215,7 @@ const ( OpApproximateReciprocalOfSqrtFloat64x2 OpCeilFloat64x2 OpDivFloat64x2 + OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 OpFloorFloat64x2 OpGreaterFloat64x2 @@ -4355,6 +4369,7 @@ const ( OpMaskedMulHighInt16x16 OpMaskedMulLowInt16x16 OpMaskedNotEqualInt16x16 + OpMaskedPairDotProdInt16x16 OpMaskedPopCountInt16x16 OpMaskedSaturatedAddInt16x16 OpMaskedSaturatedSubInt16x16 @@ -4365,6 +4380,7 @@ const ( OpMulLowInt16x16 OpNotEqualInt16x16 OpOrInt16x16 + OpPairDotProdInt16x16 OpPairwiseAddInt16x16 OpPairwiseSubInt16x16 OpPopCountInt16x16 @@ -4394,6 +4410,7 @@ const ( OpMaskedMulHighInt16x32 OpMaskedMulLowInt16x32 OpMaskedNotEqualInt16x32 + OpMaskedPairDotProdInt16x32 OpMaskedPopCountInt16x32 OpMaskedSaturatedAddInt16x32 OpMaskedSaturatedSubInt16x32 @@ -4403,6 +4420,7 @@ const ( OpMulHighInt16x32 OpMulLowInt16x32 OpNotEqualInt16x32 + OpPairDotProdInt16x32 OpPopCountInt16x32 OpSaturatedAddInt16x32 OpSaturatedSubInt16x32 @@ -4428,6 +4446,7 @@ const ( OpMaskedMulHighInt16x8 OpMaskedMulLowInt16x8 OpMaskedNotEqualInt16x8 + OpMaskedPairDotProdInt16x8 OpMaskedPopCountInt16x8 OpMaskedSaturatedAddInt16x8 OpMaskedSaturatedSubInt16x8 @@ -4438,6 +4457,7 @@ const ( OpMulLowInt16x8 OpNotEqualInt16x8 OpOrInt16x8 + OpPairDotProdInt16x8 OpPairwiseAddInt16x8 OpPairwiseSubInt16x8 OpPopCountInt16x8 @@ -4783,6 +4803,7 @@ const ( OpMaskedPopCountUint16x16 OpMaskedSaturatedAddUint16x16 OpMaskedSaturatedSubUint16x16 + OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16 OpMaskedSubUint16x16 OpMaxUint16x16 OpMinUint16x16 @@ -4794,6 +4815,7 @@ const ( OpPopCountUint16x16 OpSaturatedAddUint16x16 OpSaturatedSubUint16x16 + OpSaturatedUnsignedSignedPairDotProdUint16x16 OpSubUint16x16 OpXorUint16x16 OpAddUint16x32 @@ -4817,6 +4839,7 @@ const ( OpMaskedPopCountUint16x32 OpMaskedSaturatedAddUint16x32 OpMaskedSaturatedSubUint16x32 + OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32 OpMaskedSubUint16x32 OpMaxUint16x32 OpMinUint16x32 @@ -4825,6 +4848,7 @@ const ( OpPopCountUint16x32 OpSaturatedAddUint16x32 OpSaturatedSubUint16x32 + OpSaturatedUnsignedSignedPairDotProdUint16x32 OpSubUint16x32 OpAddUint16x8 OpAndUint16x8 @@ -4849,6 +4873,7 @@ const ( OpMaskedPopCountUint16x8 OpMaskedSaturatedAddUint16x8 OpMaskedSaturatedSubUint16x8 + OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8 OpMaskedSubUint16x8 OpMaxUint16x8 OpMinUint16x8 @@ -4860,6 +4885,7 @@ const ( OpPopCountUint16x8 OpSaturatedAddUint16x8 OpSaturatedSubUint16x8 + OpSaturatedUnsignedSignedPairDotProdUint16x8 OpSubUint16x8 OpXorUint16x8 OpAddUint32x16 @@ -5084,6 +5110,7 @@ const ( OpPopCountUint8x16 OpSaturatedAddUint8x16 OpSaturatedSubUint8x16 + OpSaturatedUnsignedSignedPairDotProdUint8x16 OpSubUint8x16 OpXorUint8x16 OpAddUint8x32 @@ -5116,6 +5143,7 @@ const ( OpPopCountUint8x32 OpSaturatedAddUint8x32 OpSaturatedSubUint8x32 + OpSaturatedUnsignedSignedPairDotProdUint8x32 OpSubUint8x32 OpXorUint8x32 OpAddUint8x64 @@ -20635,6 +20663,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWDMasked256", + argLen: 3, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTWMasked256", argLen: 2, @@ -20770,6 +20813,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWD256", + argLen: 2, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDW256", argLen: 2, @@ -21093,6 +21150,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWDMasked512", + argLen: 3, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTWMasked512", argLen: 2, @@ -21213,6 +21285,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWD512", + argLen: 2, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTW512", argLen: 1, @@ -21481,6 +21567,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWDMasked128", + argLen: 3, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTWMasked128", argLen: 2, @@ -21616,6 +21717,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWD128", + argLen: 2, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDW128", argLen: 2, @@ -25035,6 +25150,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUW256", argLen: 2, @@ -25159,6 +25289,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUW512", argLen: 2, @@ -25204,6 +25349,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGW128", argLen: 2, @@ -25283,6 +25442,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUW128", argLen: 2, @@ -25886,6 +26060,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW128", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGB256", argLen: 2, @@ -25979,6 +26167,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW256", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGB512", argLen: 2, @@ -26415,6 +26617,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VDPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD128", auxType: auxInt8, @@ -55403,6 +55621,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DotProdBroadcastFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, { name: "EqualFloat64x2", argLen: 2, @@ -56242,6 +56466,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdInt16x16", + argLen: 3, + generic: true, + }, { name: "MaskedPopCountInt16x16", argLen: 2, @@ -56299,6 +56528,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdInt16x16", + argLen: 2, + generic: true, + }, { name: "PairwiseAddInt16x16", argLen: 2, @@ -56455,6 +56689,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdInt16x32", + argLen: 3, + generic: true, + }, { name: "MaskedPopCountInt16x32", argLen: 2, @@ -56506,6 +56745,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdInt16x32", + argLen: 2, + generic: true, + }, { name: "PopCountInt16x32", argLen: 1, @@ -56643,6 +56887,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdInt16x8", + argLen: 3, + generic: true, + }, { name: "MaskedPopCountInt16x8", argLen: 2, @@ -56700,6 +56949,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdInt16x8", + argLen: 2, + generic: true, + }, { name: "PairwiseAddInt16x8", argLen: 2, @@ -58612,6 +58866,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint16x16", argLen: 3, @@ -58673,6 +58932,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint16x16", + argLen: 2, + generic: true, + }, { name: "SubUint16x16", argLen: 2, @@ -58800,6 +59064,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint16x32", argLen: 3, @@ -58845,6 +59114,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint16x32", + argLen: 2, + generic: true, + }, { name: "SubUint16x32", argLen: 2, @@ -58978,6 +59252,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint16x8", argLen: 3, @@ -59039,6 +59318,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint16x8", + argLen: 2, + generic: true, + }, { name: "SubUint16x8", argLen: 2, @@ -60293,6 +60577,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 2, + generic: true, + }, { name: "SubUint8x16", argLen: 2, @@ -60471,6 +60760,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint8x32", + argLen: 2, + generic: true, + }, { name: "SubUint8x32", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a6cf0a0b7bd0b1..3605e75213c542 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1263,6 +1263,8 @@ func rewriteValueAMD64(v *Value) bool { case OpDivFloat64x8: v.Op = OpAMD64VDIVPD512 return true + case OpDotProdBroadcastFloat64x2: + return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -2694,6 +2696,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedOrUint64x4(v) case OpMaskedOrUint64x8: return rewriteValueAMD64_OpMaskedOrUint64x8(v) + case OpMaskedPairDotProdInt16x16: + return rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v) + case OpMaskedPairDotProdInt16x32: + return rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v) + case OpMaskedPairDotProdInt16x8: + return rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v) case OpMaskedPopCountInt16x16: return rewriteValueAMD64_OpMaskedPopCountInt16x16(v) case OpMaskedPopCountInt16x32: @@ -2814,6 +2822,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) case OpMaskedSaturatedSubUint8x64: return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v) case OpMaskedSqrtFloat32x16: return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) case OpMaskedSqrtFloat32x4: @@ -3476,6 +3490,15 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true + case OpPairDotProdInt16x16: + v.Op = OpAMD64VPMADDWD256 + return true + case OpPairDotProdInt16x32: + v.Op = OpAMD64VPMADDWD512 + return true + case OpPairDotProdInt16x8: + v.Op = OpAMD64VPMADDWD128 + return true case OpPairwiseAddFloat32x4: v.Op = OpAMD64VHADDPS128 return true @@ -3838,6 +3861,21 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubUint8x64: v.Op = OpAMD64VPSUBSB512 return true + case OpSaturatedUnsignedSignedPairDotProdUint16x16: + v.Op = OpAMD64VPMADDUBSW256 + return true + case OpSaturatedUnsignedSignedPairDotProdUint16x32: + v.Op = OpAMD64VPMADDUBSW512 + return true + case OpSaturatedUnsignedSignedPairDotProdUint16x8: + v.Op = OpAMD64VPMADDUBSW128 + return true + case OpSaturatedUnsignedSignedPairDotProdUint8x16: + v.Op = OpAMD64VPMADDUBSW128 + return true + case OpSaturatedUnsignedSignedPairDotProdUint8x32: + v.Op = OpAMD64VPMADDUBSW256 + return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -29568,6 +29606,20 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool { return true } } +func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DotProdBroadcastFloat64x2 x y) + // result: (VDPPD128 [127] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDPPD128) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42720,6 +42772,60 @@ func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdInt16x16 x y mask) + // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdInt16x32 x y mask) + // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdInt16x8 x y mask) + // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -43752,6 +43858,60 @@ func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d05d0e2066f7f6..7ac5f74246edc8 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -155,6 +155,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) @@ -235,6 +236,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) @@ -257,6 +259,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) @@ -276,6 +279,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) @@ -469,6 +473,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) @@ -485,6 +490,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) @@ -505,6 +511,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) @@ -622,6 +629,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) @@ -640,6 +648,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) @@ -775,6 +784,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) @@ -789,6 +799,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) @@ -803,6 +814,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) @@ -948,6 +960,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) @@ -962,6 +975,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) @@ -976,6 +990,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index d433b67c9aefc4..aaa647991908ad 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -486,6 +486,11 @@ func (x Float64x2) AndNot(y Float64x2) Float64x2 // Asm: VDIVPD, CPU Feature: AVX func (x Float64x2) Div(y Float64x2) Float64x2 +// Multiply all the elements and add them together; the result is a broadcast of the dot product +// +// Asm: VDPPD, CPU Feature: AVX +func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 + // Predicate immediate is 0 if it has; // // Asm: VCMPPD, CPU Feature: AVX @@ -792,6 +797,11 @@ func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // Asm: VPOR, CPU Feature: AVX2 func (x Int16x16) Or(y Int16x16) Int16x16 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 + // Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target // // Asm: VPHADDW, CPU Feature: AVX2 @@ -882,6 +892,11 @@ func (x Int16x32) MulLow(y Int16x32) Int16x32 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqual(y Int16x32) Mask16x32 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 @@ -955,6 +970,11 @@ func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // Asm: VPOR, CPU Feature: AVX func (x Int16x8) Or(y Int16x8) Int16x8 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 + // Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target // // Asm: VPHADDW, CPU Feature: AVX @@ -1698,6 +1718,11 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // Asm: VPSUBSW, CPU Feature: AVX2 func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 + // Asm: VPSUBW, CPU Feature: AVX2 func (x Uint16x16) Sub(y Uint16x16) Uint16x16 @@ -1760,6 +1785,11 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) Sub(y Uint16x32) Uint16x32 @@ -1838,6 +1868,11 @@ func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 // Asm: VPSUBSW, CPU Feature: AVX func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 + // Asm: VPSUBW, CPU Feature: AVX func (x Uint16x8) Sub(y Uint16x8) Uint16x8 @@ -2291,6 +2326,11 @@ func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 // Asm: VPSUBSB, CPU Feature: AVX func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 + // Asm: VPSUBB, CPU Feature: AVX func (x Uint8x16) Sub(y Uint8x16) Uint8x16 @@ -2357,6 +2397,11 @@ func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 // Asm: VPSUBSB, CPU Feature: AVX2 func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 + // Asm: VPSUBB, CPU Feature: AVX2 func (x Uint8x32) Sub(y Uint8x32) Uint8x32 @@ -2874,6 +2919,11 @@ func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 @@ -2932,6 +2982,11 @@ func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 @@ -2990,6 +3045,11 @@ func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 @@ -3565,6 +3625,11 @@ func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 @@ -3621,6 +3686,11 @@ func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 @@ -3677,6 +3747,11 @@ func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 From 3df41c856e09cb0111604865a652f946379aad7a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:42:02 +0000 Subject: [PATCH 023/139] [dev.simd] simd: update documentations This CL is generated by CL 679955. Change-Id: Iff92222bfb493730e147e5b7d2cd940d7ca50f1d Reviewed-on: https://go-review.googlesource.com/c/go/+/681297 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/stubs_amd64.go | 3242 +++++++++++++++++++++++++++++++++------ 1 file changed, 2781 insertions(+), 461 deletions(-) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index aaa647991908ad..83edaf2270025e 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -4,4822 +4,7142 @@ package simd +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocal() Float32x16 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x16) Sqrt() Float32x16 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x4) ApproximateReciprocal() Float32x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Ceil() Float32x4 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Floor() Float32x4 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Round() Float32x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x4) Sqrt() Float32x4 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Trunc() Float32x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocal() Float32x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Ceil() Float32x8 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Floor() Float32x8 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Round() Float32x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x8) Sqrt() Float32x8 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Trunc() Float32x8 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocal() Float64x2 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Ceil() Float64x2 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Floor() Float64x2 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Round() Float64x2 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x2) Sqrt() Float64x2 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Trunc() Float64x2 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocal() Float64x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Ceil() Float64x4 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Floor() Float64x4 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Round() Float64x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x4) Sqrt() Float64x4 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Trunc() Float64x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocal() Float64x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) Sqrt() Float64x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX2 func (x Int16x16) Absolute() Int16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x16) PopCount() Int16x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x32) Absolute() Int16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x32) PopCount() Int16x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX func (x Int16x8) Absolute() Int16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x8) PopCount() Int16x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x16) Absolute() Int32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x16) PopCount() Int32x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX func (x Int32x4) Absolute() Int32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x4) PopCount() Int32x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX2 func (x Int32x8) Absolute() Int32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x8) PopCount() Int32x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x2) Absolute() Int64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x2) PopCount() Int64x2 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x4) Absolute() Int64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x4) PopCount() Int64x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) Absolute() Int64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x8) PopCount() Int64x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX func (x Int8x16) Absolute() Int8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x16) PopCount() Int8x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX2 func (x Int8x32) Absolute() Int8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x32) PopCount() Int8x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x64) Absolute() Int8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x64) PopCount() Int8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x16) PopCount() Uint16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x32) PopCount() Uint16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x8) PopCount() Uint16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x16) PopCount() Uint32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x4) PopCount() Uint32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x8) PopCount() Uint32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x2) PopCount() Uint64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x4) PopCount() Uint64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) PopCount() Uint64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x16) PopCount() Uint8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x32) PopCount() Uint8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x64) PopCount() Uint8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) Add(y Float32x16) Float32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x16) And(y Float32x16) Float32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x16) AndNot(y Float32x16) Float32x16 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x16) Div(y Float32x16) Float32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Equal(y Float32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Greater(y Float32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) IsNan(y Float32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Less(y Float32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessEqual(y Float32x16) Mask32x16 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x16) Max(y Float32x16) Float32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x16) Min(y Float32x16) Float32x16 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x16) Mul(y Float32x16) Float32x16 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) NotEqual(y Float32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x16) Or(y Float32x16) Float32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) Sub(y Float32x16) Float32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x16) Xor(y Float32x16) Float32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x4) Add(y Float32x4) Float32x4 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPS, CPU Feature: AVX func (x Float32x4) AddSub(y Float32x4) Float32x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX func (x Float32x4) And(y Float32x4) Float32x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX func (x Float32x4) AndNot(y Float32x4) Float32x4 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX func (x Float32x4) Div(y Float32x4) Float32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Equal(y Float32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Greater(y Float32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) IsNan(y Float32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Less(y Float32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) LessEqual(y Float32x4) Mask32x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX func (x Float32x4) Max(y Float32x4) Float32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX func (x Float32x4) Min(y Float32x4) Float32x4 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPS, CPU Feature: AVX func (x Float32x4) Mul(y Float32x4) Float32x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) NotEqual(y Float32x4) Mask32x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX func (x Float32x4) Or(y Float32x4) Float32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPS, CPU Feature: AVX func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPS, CPU Feature: AVX func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x4) Sub(y Float32x4) Float32x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX func (x Float32x4) Xor(y Float32x4) Float32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x8) Add(y Float32x8) Float32x8 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPS, CPU Feature: AVX func (x Float32x8) AddSub(y Float32x8) Float32x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX func (x Float32x8) And(y Float32x8) Float32x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX func (x Float32x8) AndNot(y Float32x8) Float32x8 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX func (x Float32x8) Div(y Float32x8) Float32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Equal(y Float32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Greater(y Float32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) IsNan(y Float32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Less(y Float32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) LessEqual(y Float32x8) Mask32x8 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX func (x Float32x8) Max(y Float32x8) Float32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX func (x Float32x8) Min(y Float32x8) Float32x8 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPS, CPU Feature: AVX func (x Float32x8) Mul(y Float32x8) Float32x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) NotEqual(y Float32x8) Mask32x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX func (x Float32x8) Or(y Float32x8) Float32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPS, CPU Feature: AVX func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPS, CPU Feature: AVX func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x8) Sub(y Float32x8) Float32x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX func (x Float32x8) Xor(y Float32x8) Float32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x2) Add(y Float64x2) Float64x2 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPD, CPU Feature: AVX func (x Float64x2) AddSub(y Float64x2) Float64x2 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX func (x Float64x2) And(y Float64x2) Float64x2 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX func (x Float64x2) AndNot(y Float64x2) Float64x2 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX func (x Float64x2) Div(y Float64x2) Float64x2 -// Multiply all the elements and add them together; the result is a broadcast of the dot product +// DotProdBroadcast multiplies all elements and broadcasts the sum. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Equal(y Float64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Greater(y Float64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) IsNan(y Float64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Less(y Float64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) LessEqual(y Float64x2) Mask64x2 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX func (x Float64x2) Max(y Float64x2) Float64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX func (x Float64x2) Min(y Float64x2) Float64x2 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPD, CPU Feature: AVX func (x Float64x2) Mul(y Float64x2) Float64x2 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) NotEqual(y Float64x2) Mask64x2 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX func (x Float64x2) Or(y Float64x2) Float64x2 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPD, CPU Feature: AVX func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPD, CPU Feature: AVX func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x2) Sub(y Float64x2) Float64x2 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX func (x Float64x2) Xor(y Float64x2) Float64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x4) Add(y Float64x4) Float64x4 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPD, CPU Feature: AVX func (x Float64x4) AddSub(y Float64x4) Float64x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX func (x Float64x4) And(y Float64x4) Float64x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX func (x Float64x4) AndNot(y Float64x4) Float64x4 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX func (x Float64x4) Div(y Float64x4) Float64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Equal(y Float64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Greater(y Float64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) IsNan(y Float64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Less(y Float64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) LessEqual(y Float64x4) Mask64x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX func (x Float64x4) Max(y Float64x4) Float64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX func (x Float64x4) Min(y Float64x4) Float64x4 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPD, CPU Feature: AVX func (x Float64x4) Mul(y Float64x4) Float64x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) NotEqual(y Float64x4) Mask64x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX func (x Float64x4) Or(y Float64x4) Float64x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPD, CPU Feature: AVX func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPD, CPU Feature: AVX func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x4) Sub(y Float64x4) Float64x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX func (x Float64x4) Xor(y Float64x4) Float64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) Add(y Float64x8) Float64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x8) And(y Float64x8) Float64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x8) AndNot(y Float64x8) Float64x8 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) Div(y Float64x8) Float64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Equal(y Float64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) IsNan(y Float64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Less(y Float64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessEqual(y Float64x8) Mask64x8 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x8) Max(y Float64x8) Float64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x8) Min(y Float64x8) Float64x8 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x8) Mul(y Float64x8) Float64x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) NotEqual(y Float64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x8) Or(y Float64x8) Float64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) Sub(y Float64x8) Float64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x8) Xor(y Float64x8) Float64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX2 func (x Int16x16) Add(y Int16x16) Int16x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int16x16) And(y Int16x16) Int16x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int16x16) AndNot(y Int16x16) Int16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX2 func (x Int16x16) Greater(y Int16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) Less(y Int16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessEqual(y Int16x16) Mask16x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX2 func (x Int16x16) Max(y Int16x16) Int16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX2 func (x Int16x16) Min(y Int16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHW, CPU Feature: AVX2 func (x Int16x16) MulHigh(y Int16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLW, CPU Feature: AVX2 func (x Int16x16) MulLow(y Int16x16) Int16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) NotEqual(y Int16x16) Mask16x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int16x16) Or(y Int16x16) Int16x16 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX2 func (x Int16x16) PairDotProd(y Int16x16) Int32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX2 func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX2 func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX2 func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDSW, CPU Feature: AVX2 func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBSW, CPU Feature: AVX2 func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX2 func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNW, CPU Feature: AVX2 func (x Int16x16) Sign(y Int16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX2 func (x Int16x16) Sub(y Int16x16) Int16x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int16x16) Xor(y Int16x16) Int16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x32) Add(y Int16x32) Int16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x32) Equal(y Int16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x32) Greater(y Int16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Less(y Int16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessEqual(y Int16x32) Mask16x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x32) Max(y Int16x32) Int16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x32) Min(y Int16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x32) MulHigh(y Int16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x32) MulLow(y Int16x32) Int16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqual(y Int16x32) Mask16x32 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x32) PairDotProd(y Int16x32) Int32x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x32) Sub(y Int16x32) Int16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX func (x Int16x8) Add(y Int16x8) Int16x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int16x8) And(y Int16x8) Int16x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int16x8) AndNot(y Int16x8) Int16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX func (x Int16x8) Equal(y Int16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX func (x Int16x8) Greater(y Int16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) Less(y Int16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessEqual(y Int16x8) Mask16x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX func (x Int16x8) Max(y Int16x8) Int16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX func (x Int16x8) Min(y Int16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHW, CPU Feature: AVX func (x Int16x8) MulHigh(y Int16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLW, CPU Feature: AVX func (x Int16x8) MulLow(y Int16x8) Int16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) NotEqual(y Int16x8) Mask16x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int16x8) Or(y Int16x8) Int16x8 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX func (x Int16x8) PairDotProd(y Int16x8) Int32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDSW, CPU Feature: AVX func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBSW, CPU Feature: AVX func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNW, CPU Feature: AVX func (x Int16x8) Sign(y Int16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX func (x Int16x8) Sub(y Int16x8) Int16x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int16x8) Xor(y Int16x8) Int16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x16) Add(y Int32x16) Int32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x16) And(y Int32x16) Int32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x16) AndNot(y Int32x16) Int32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x16) Equal(y Int32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x16) Greater(y Int32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Less(y Int32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessEqual(y Int32x16) Mask32x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x16) Max(y Int32x16) Int32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x16) Min(y Int32x16) Int32x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x16) MulLow(y Int32x16) Int32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) NotEqual(y Int32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x16) Or(y Int32x16) Int32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x16) Sub(y Int32x16) Int32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) Xor(y Int32x16) Int32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX func (x Int32x4) Add(y Int32x4) Int32x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int32x4) And(y Int32x4) Int32x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int32x4) AndNot(y Int32x4) Int32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX func (x Int32x4) Equal(y Int32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX func (x Int32x4) Greater(y Int32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) Less(y Int32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessEqual(y Int32x4) Mask32x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX func (x Int32x4) Max(y Int32x4) Int32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX func (x Int32x4) Min(y Int32x4) Int32x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLD, CPU Feature: AVX func (x Int32x4) MulLow(y Int32x4) Int32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) NotEqual(y Int32x4) Mask32x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int32x4) Or(y Int32x4) Int32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGND, CPU Feature: AVX func (x Int32x4) Sign(y Int32x4) Int32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX func (x Int32x4) Sub(y Int32x4) Int32x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int32x4) Xor(y Int32x4) Int32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX2 func (x Int32x8) Add(y Int32x8) Int32x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int32x8) And(y Int32x8) Int32x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int32x8) AndNot(y Int32x8) Int32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Int32x8) Equal(y Int32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) Less(y Int32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessEqual(y Int32x8) Mask32x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX2 func (x Int32x8) Max(y Int32x8) Int32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX2 func (x Int32x8) Min(y Int32x8) Int32x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX2 func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLD, CPU Feature: AVX2 func (x Int32x8) MulLow(y Int32x8) Int32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) NotEqual(y Int32x8) Mask32x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int32x8) Or(y Int32x8) Int32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX2 func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX2 func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGND, CPU Feature: AVX2 func (x Int32x8) Sign(y Int32x8) Int32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX2 func (x Int32x8) Sub(y Int32x8) Int32x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int32x8) Xor(y Int32x8) Int32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX func (x Int64x2) Add(y Int64x2) Int64x2 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int64x2) And(y Int64x2) Int64x2 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int64x2) AndNot(y Int64x2) Int64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX func (x Int64x2) Equal(y Int64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x2) Greater(y Int64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) Less(y Int64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessEqual(y Int64x2) Mask64x2 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x2) Max(y Int64x2) Int64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x2) Min(y Int64x2) Int64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x2) MulLow(y Int64x2) Int64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) NotEqual(y Int64x2) Mask64x2 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int64x2) Or(y Int64x2) Int64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX func (x Int64x2) Sub(y Int64x2) Int64x2 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int64x2) Xor(y Int64x2) Int64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX2 func (x Int64x4) Add(y Int64x4) Int64x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int64x4) And(y Int64x4) Int64x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int64x4) AndNot(y Int64x4) Int64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX2 func (x Int64x4) Greater(y Int64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) Less(y Int64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessEqual(y Int64x4) Mask64x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x4) Max(y Int64x4) Int64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x4) Min(y Int64x4) Int64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x4) MulLow(y Int64x4) Int64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) NotEqual(y Int64x4) Mask64x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int64x4) Or(y Int64x4) Int64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX2 func (x Int64x4) Sub(y Int64x4) Int64x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int64x4) Xor(y Int64x4) Int64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x8) Add(y Int64x8) Int64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x8) And(y Int64x8) Int64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x8) AndNot(y Int64x8) Int64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x8) Equal(y Int64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x8) Greater(y Int64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Less(y Int64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessEqual(y Int64x8) Mask64x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) Max(y Int64x8) Int64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x8) Min(y Int64x8) Int64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MulLow(y Int64x8) Int64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) NotEqual(y Int64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x8) Or(y Int64x8) Int64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x8) Sub(y Int64x8) Int64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x8) Xor(y Int64x8) Int64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX func (x Int8x16) Add(y Int8x16) Int8x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int8x16) And(y Int8x16) Int8x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int8x16) AndNot(y Int8x16) Int8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX func (x Int8x16) Equal(y Int8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX func (x Int8x16) Greater(y Int8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) Less(y Int8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessEqual(y Int8x16) Mask8x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX func (x Int8x16) Max(y Int8x16) Int8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX func (x Int8x16) Min(y Int8x16) Int8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) NotEqual(y Int8x16) Mask8x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int8x16) Or(y Int8x16) Int8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNB, CPU Feature: AVX func (x Int8x16) Sign(y Int8x16) Int8x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX func (x Int8x16) Sub(y Int8x16) Int8x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int8x16) Xor(y Int8x16) Int8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX2 func (x Int8x32) Add(y Int8x32) Int8x32 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int8x32) And(y Int8x32) Int8x32 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int8x32) AndNot(y Int8x32) Int8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Int8x32) Equal(y Int8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX2 func (x Int8x32) Greater(y Int8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) Less(y Int8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessEqual(y Int8x32) Mask8x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX2 func (x Int8x32) Max(y Int8x32) Int8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX2 func (x Int8x32) Min(y Int8x32) Int8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) NotEqual(y Int8x32) Mask8x32 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int8x32) Or(y Int8x32) Int8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX2 func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX2 func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNB, CPU Feature: AVX2 func (x Int8x32) Sign(y Int8x32) Int8x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX2 func (x Int8x32) Sub(y Int8x32) Int8x32 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int8x32) Xor(y Int8x32) Int8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x64) Add(y Int8x64) Int8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x64) Equal(y Int8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x64) Greater(y Int8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Less(y Int8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessEqual(y Int8x64) Mask8x64 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x64) Max(y Int8x64) Int8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x64) Min(y Int8x64) Int8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) NotEqual(y Int8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x64) Sub(y Int8x64) Int8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX2 func (x Uint16x16) Add(y Uint16x16) Uint16x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint16x16) And(y Uint16x16) Uint16x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX2 func (x Uint16x16) Average(y Uint16x16) Uint16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Equal(y Uint16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Greater(y Uint16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Less(y Uint16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX2 func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX2 func (x Uint16x16) Min(y Uint16x16) Uint16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHUW, CPU Feature: AVX2 func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint16x16) Or(y Uint16x16) Uint16x16 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX2 func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX2 func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX2 func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX2 func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX2 func (x Uint16x16) Sub(y Uint16x16) Uint16x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint16x16) Xor(y Uint16x16) Uint16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x32) Add(y Uint16x32) Uint16x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) Average(y Uint16x32) Uint16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Greater(y Uint16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x32) Max(y Uint16x32) Uint16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x32) Min(y Uint16x32) Uint16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) Sub(y Uint16x32) Uint16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX func (x Uint16x8) Add(y Uint16x8) Uint16x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint16x8) And(y Uint16x8) Uint16x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX func (x Uint16x8) Average(y Uint16x8) Uint16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Equal(y Uint16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Greater(y Uint16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Less(y Uint16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX func (x Uint16x8) Min(y Uint16x8) Uint16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHUW, CPU Feature: AVX func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint16x8) Or(y Uint16x8) Uint16x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX func (x Uint16x8) Sub(y Uint16x8) Uint16x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint16x8) Xor(y Uint16x8) Uint16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x16) Add(y Uint32x16) Uint32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x16) And(y Uint32x16) Uint32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Equal(y Uint32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Greater(y Uint32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x16) Max(y Uint32x16) Uint32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x16) Min(y Uint32x16) Uint32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x16) Or(y Uint32x16) Uint32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x16) Sub(y Uint32x16) Uint32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) Xor(y Uint32x16) Uint32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX func (x Uint32x4) Add(y Uint32x4) Uint32x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint32x4) And(y Uint32x4) Uint32x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Equal(y Uint32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Greater(y Uint32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Less(y Uint32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX func (x Uint32x4) Min(y Uint32x4) Uint32x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint32x4) Or(y Uint32x4) Uint32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX func (x Uint32x4) Sub(y Uint32x4) Uint32x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint32x4) Xor(y Uint32x4) Uint32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX2 func (x Uint32x8) Add(y Uint32x8) Uint32x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint32x8) And(y Uint32x8) Uint32x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Equal(y Uint32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Greater(y Uint32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Less(y Uint32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX2 func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX2 func (x Uint32x8) Min(y Uint32x8) Uint32x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX2 func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint32x8) Or(y Uint32x8) Uint32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX2 func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX2 func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX2 func (x Uint32x8) Sub(y Uint32x8) Uint32x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint32x8) Xor(y Uint32x8) Uint32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX func (x Uint64x2) Add(y Uint64x2) Uint64x2 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint64x2) And(y Uint64x2) Uint64x2 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Equal(y Uint64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Greater(y Uint64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Less(y Uint64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Max(y Uint64x2) Uint64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Min(y Uint64x2) Uint64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX func (x Uint64x2) Sub(y Uint64x2) Uint64x2 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint64x2) Xor(y Uint64x2) Uint64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX2 func (x Uint64x4) Add(y Uint64x4) Uint64x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint64x4) And(y Uint64x4) Uint64x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Equal(y Uint64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Greater(y Uint64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Less(y Uint64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Max(y Uint64x4) Uint64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Min(y Uint64x4) Uint64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX2 func (x Uint64x4) Sub(y Uint64x4) Uint64x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint64x4) Xor(y Uint64x4) Uint64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) Add(y Uint64x8) Uint64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) And(y Uint64x8) Uint64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Equal(y Uint64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Greater(y Uint64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Less(y Uint64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Max(y Uint64x8) Uint64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Min(y Uint64x8) Uint64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Or(y Uint64x8) Uint64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) Sub(y Uint64x8) Uint64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Xor(y Uint64x8) Uint64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX func (x Uint8x16) Add(y Uint8x16) Uint8x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint8x16) And(y Uint8x16) Uint8x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX func (x Uint8x16) Average(y Uint8x16) Uint8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Equal(y Uint8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Greater(y Uint8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Less(y Uint8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX func (x Uint8x16) Min(y Uint8x16) Uint8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint8x16) Or(y Uint8x16) Uint8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX func (x Uint8x16) Sub(y Uint8x16) Uint8x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint8x16) Xor(y Uint8x16) Uint8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX2 func (x Uint8x32) Add(y Uint8x32) Uint8x32 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint8x32) And(y Uint8x32) Uint8x32 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX2 func (x Uint8x32) Average(y Uint8x32) Uint8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Equal(y Uint8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Greater(y Uint8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Less(y Uint8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX2 func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX2 func (x Uint8x32) Min(y Uint8x32) Uint8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX2 func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX2 func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX2 func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX2 func (x Uint8x32) Sub(y Uint8x32) Uint8x32 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint8x32) Xor(y Uint8x32) Uint8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x64) Add(y Uint8x64) Uint8x64 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x64) Average(y Uint8x64) Uint8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Greater(y Uint8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x64) Max(y Uint8x64) Uint8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x64) Min(y Uint8x64) Uint8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) Sub(y Uint8x64) Uint8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 From ded6e0ac7140403480fa4539ed42ae8577eefbf9 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:43:10 +0000 Subject: [PATCH 024/139] [dev.simd] cmd/compile: add more dot products This CL is generated by CL 680215. Change-Id: Ie085e65e0473a8e96170702d7265d379ec8812ba Reviewed-on: https://go-review.googlesource.com/c/go/+/681298 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 40 ++ .../compile/internal/ssa/_gen/simdAMD64.rules | 36 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 24 + .../internal/ssa/_gen/simdgenericOps.go | 36 + src/cmd/compile/internal/ssa/opGen.go | 636 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 450 +++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 36 + src/simd/stubs_amd64.go | 181 +++++ 8 files changed, 1439 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 02353c7f7b47b1..7e9abbd3cbe317 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -679,6 +679,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPBMasked512: p = simdFp2k1k1Imm8(s, v) + case ssa.OpAMD64VPDPWSSD128, + ssa.OpAMD64VPDPWSSD256, + ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VPDPWSSDS128, + ssa.OpAMD64VPDPWSSDS256, + ssa.OpAMD64VPDPWSSDS512, + ssa.OpAMD64VPDPBUSDS128, + ssa.OpAMD64VPDPBUSDS256, + ssa.OpAMD64VPDPBUSDS512, + ssa.OpAMD64VPDPBUSD128, + ssa.OpAMD64VPDPBUSD256, + ssa.OpAMD64VPDPBUSD512: + p = simdFp31ResultInArg0(s, v) + + case ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256: + p = simdFp3k1fp1ResultInArg0(s, v) + default: // Unknown reg shape return false @@ -884,6 +912,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, ssa.OpAMD64VPOPCNTWMasked128, @@ -902,6 +933,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, ssa.OpAMD64VPSUBSWMasked256, ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPSUBSWMasked128, @@ -911,6 +945,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, @@ -929,6 +966,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBBMasked128, ssa.OpAMD64VPSUBBMasked256, ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d5caf09daccd6f..efee484b9993c6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -833,6 +833,9 @@ (MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) @@ -881,6 +884,9 @@ (MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) @@ -896,6 +902,12 @@ (MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) @@ -944,6 +956,12 @@ (MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) @@ -1118,6 +1136,9 @@ (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) (PairDotProdInt16x8 ...) => (VPMADDWD128 ...) +(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) +(PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) +(PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) @@ -1194,6 +1215,9 @@ (SaturatedAddUint8x16 ...) => (VPADDSB128 ...) (SaturatedAddUint8x32 ...) => (VPADDSB256 ...) (SaturatedAddUint8x64 ...) => (VPADDSB512 ...) +(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) +(SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) (SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) (SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) @@ -1215,6 +1239,12 @@ (SaturatedUnsignedSignedPairDotProdUint16x8 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) (SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) (SignInt32x4 ...) => (VPSIGND128 ...) @@ -1273,6 +1303,12 @@ (TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) (TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) (TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) +(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) +(UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) (XorFloat32x16 ...) => (VXORPS512 ...) (XorFloat32x4 ...) => (VXORPS128 ...) (XorFloat32x8 ...) => (VXORPS256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index f580973c9dcb19..6cc405c0300fcb 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -283,15 +283,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSUBDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -307,18 +315,26 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSUBDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPXORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSD128", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -333,18 +349,26 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSUBDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPXORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSD256", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 3e3411e0dfb180..404f1fc69fd07d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -427,16 +427,24 @@ func simdGenericOps() []opData { {name: "MaskedMulLowInt32x16", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt32x16", argLength: 3, commutative: true}, {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, {name: "MaxInt32x16", argLength: 2, commutative: true}, {name: "MinInt32x16", argLength: 2, commutative: true}, {name: "MulLowInt32x16", argLength: 2, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "XorInt32x16", argLength: 2, commutative: true}, {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, {name: "AddInt32x4", argLength: 2, commutative: true}, @@ -461,8 +469,12 @@ func simdGenericOps() []opData { {name: "MaskedMulLowInt32x4", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt32x4", argLength: 3, commutative: true}, {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, + {name: "MaskedSaturatedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, {name: "MaxInt32x4", argLength: 2, commutative: true}, {name: "MinInt32x4", argLength: 2, commutative: true}, @@ -470,11 +482,15 @@ func simdGenericOps() []opData { {name: "MulLowInt32x4", argLength: 2, commutative: true}, {name: "NotEqualInt32x4", argLength: 2, commutative: true}, {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "XorInt32x4", argLength: 2, commutative: true}, {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, {name: "AddInt32x8", argLength: 2, commutative: true}, @@ -499,8 +515,12 @@ func simdGenericOps() []opData { {name: "MaskedMulLowInt32x8", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt32x8", argLength: 3, commutative: true}, {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, {name: "MaxInt32x8", argLength: 2, commutative: true}, {name: "MinInt32x8", argLength: 2, commutative: true}, @@ -508,11 +528,15 @@ func simdGenericOps() []opData { {name: "MulLowInt32x8", argLength: 2, commutative: true}, {name: "NotEqualInt32x8", argLength: 2, commutative: true}, {name: "OrInt32x8", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "XorInt32x8", argLength: 2, commutative: true}, {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, {name: "AddInt64x2", argLength: 2, commutative: true}, @@ -845,14 +869,18 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, {name: "MaxUint32x16", argLength: 2, commutative: true}, {name: "MinUint32x16", argLength: 2, commutative: true}, {name: "NotEqualUint32x16", argLength: 2, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, @@ -875,7 +903,9 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, {name: "MaxUint32x4", argLength: 2, commutative: true}, {name: "MinUint32x4", argLength: 2, commutative: true}, @@ -885,7 +915,9 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, @@ -908,7 +940,9 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, {name: "MaxUint32x8", argLength: 2, commutative: true}, {name: "MinUint32x8", argLength: 2, commutative: true}, @@ -918,7 +952,9 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, {name: "AndUint64x2", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 3ef08ae5559121..26facad933461b 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1476,15 +1476,23 @@ const ( OpAMD64VPMINSDMasked512 OpAMD64VPMULLDMasked512 OpAMD64VPORDMasked512 + OpAMD64VPDPWSSDMasked512 OpAMD64VPOPCNTDMasked512 + OpAMD64VPDPWSSDSMasked512 + OpAMD64VPDPBUSDSMasked512 OpAMD64VPSUBDMasked512 + OpAMD64VPDPBUSDMasked512 OpAMD64VPXORDMasked512 OpAMD64VPMAXSD512 OpAMD64VPMINSD512 OpAMD64VPMULLD512 OpAMD64VPORD512 + OpAMD64VPDPWSSD512 OpAMD64VPOPCNTD512 + OpAMD64VPDPWSSDS512 + OpAMD64VPDPBUSDS512 OpAMD64VPSUBD512 + OpAMD64VPDPBUSD512 OpAMD64VPXORD512 OpAMD64VPABSD128 OpAMD64VPADDD128 @@ -1500,18 +1508,26 @@ const ( OpAMD64VPMINSDMasked128 OpAMD64VPMULLDMasked128 OpAMD64VPORDMasked128 + OpAMD64VPDPWSSDMasked128 OpAMD64VPOPCNTDMasked128 + OpAMD64VPDPWSSDSMasked128 + OpAMD64VPDPBUSDSMasked128 OpAMD64VPSUBDMasked128 + OpAMD64VPDPBUSDMasked128 OpAMD64VPXORDMasked128 OpAMD64VPMAXSD128 OpAMD64VPMINSD128 OpAMD64VPMULDQ128 OpAMD64VPMULLD128 + OpAMD64VPDPWSSD128 OpAMD64VPHADDD128 OpAMD64VPHSUBD128 OpAMD64VPOPCNTD128 + OpAMD64VPDPWSSDS128 + OpAMD64VPDPBUSDS128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 + OpAMD64VPDPBUSD128 OpAMD64VPABSD256 OpAMD64VPADDD256 OpAMD64VPCMPEQD256 @@ -1526,18 +1542,26 @@ const ( OpAMD64VPMINSDMasked256 OpAMD64VPMULLDMasked256 OpAMD64VPORDMasked256 + OpAMD64VPDPWSSDMasked256 OpAMD64VPOPCNTDMasked256 + OpAMD64VPDPWSSDSMasked256 + OpAMD64VPDPBUSDSMasked256 OpAMD64VPSUBDMasked256 + OpAMD64VPDPBUSDMasked256 OpAMD64VPXORDMasked256 OpAMD64VPMAXSD256 OpAMD64VPMINSD256 OpAMD64VPMULDQ256 OpAMD64VPMULLD256 + OpAMD64VPDPWSSD256 OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 + OpAMD64VPDPWSSDS256 + OpAMD64VPDPBUSDS256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 + OpAMD64VPDPBUSD256 OpAMD64VPABSQ128 OpAMD64VPADDQ128 OpAMD64VPCMPEQQ128 @@ -4491,16 +4515,24 @@ const ( OpMaskedMulLowInt32x16 OpMaskedNotEqualInt32x16 OpMaskedOrInt32x16 + OpMaskedPairDotProdAccumulateInt32x16 OpMaskedPopCountInt32x16 + OpMaskedSaturatedPairDotProdAccumulateInt32x16 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 OpMaskedSubInt32x16 + OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16 OpMaskedXorInt32x16 OpMaxInt32x16 OpMinInt32x16 OpMulLowInt32x16 OpNotEqualInt32x16 OpOrInt32x16 + OpPairDotProdAccumulateInt32x16 OpPopCountInt32x16 + OpSaturatedPairDotProdAccumulateInt32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 OpSubInt32x16 + OpUnsignedSignedQuadDotProdAccumulateInt32x16 OpXorInt32x16 OpAbsoluteInt32x4 OpAddInt32x4 @@ -4525,8 +4557,12 @@ const ( OpMaskedMulLowInt32x4 OpMaskedNotEqualInt32x4 OpMaskedOrInt32x4 + OpMaskedPairDotProdAccumulateInt32x4 OpMaskedPopCountInt32x4 + OpMaskedSaturatedPairDotProdAccumulateInt32x4 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 OpMaskedSubInt32x4 + OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4 OpMaskedXorInt32x4 OpMaxInt32x4 OpMinInt32x4 @@ -4534,11 +4570,15 @@ const ( OpMulLowInt32x4 OpNotEqualInt32x4 OpOrInt32x4 + OpPairDotProdAccumulateInt32x4 OpPairwiseAddInt32x4 OpPairwiseSubInt32x4 OpPopCountInt32x4 + OpSaturatedPairDotProdAccumulateInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 OpSignInt32x4 OpSubInt32x4 + OpUnsignedSignedQuadDotProdAccumulateInt32x4 OpXorInt32x4 OpAbsoluteInt32x8 OpAddInt32x8 @@ -4563,8 +4603,12 @@ const ( OpMaskedMulLowInt32x8 OpMaskedNotEqualInt32x8 OpMaskedOrInt32x8 + OpMaskedPairDotProdAccumulateInt32x8 OpMaskedPopCountInt32x8 + OpMaskedSaturatedPairDotProdAccumulateInt32x8 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpMaskedSubInt32x8 + OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8 OpMaskedXorInt32x8 OpMaxInt32x8 OpMinInt32x8 @@ -4572,11 +4616,15 @@ const ( OpMulLowInt32x8 OpNotEqualInt32x8 OpOrInt32x8 + OpPairDotProdAccumulateInt32x8 OpPairwiseAddInt32x8 OpPairwiseSubInt32x8 OpPopCountInt32x8 + OpSaturatedPairDotProdAccumulateInt32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpSignInt32x8 OpSubInt32x8 + OpUnsignedSignedQuadDotProdAccumulateInt32x8 OpXorInt32x8 OpAbsoluteInt64x2 OpAddInt64x2 @@ -4909,14 +4957,18 @@ const ( OpMaskedNotEqualUint32x16 OpMaskedOrUint32x16 OpMaskedPopCountUint32x16 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 OpMaskedSubUint32x16 + OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16 OpMaskedXorUint32x16 OpMaxUint32x16 OpMinUint32x16 OpNotEqualUint32x16 OpOrUint32x16 OpPopCountUint32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 OpSubUint32x16 + OpUnsignedSignedQuadDotProdAccumulateUint32x16 OpXorUint32x16 OpAddUint32x4 OpAndUint32x4 @@ -4939,7 +4991,9 @@ const ( OpMaskedNotEqualUint32x4 OpMaskedOrUint32x4 OpMaskedPopCountUint32x4 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 OpMaskedSubUint32x4 + OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4 OpMaskedXorUint32x4 OpMaxUint32x4 OpMinUint32x4 @@ -4949,7 +5003,9 @@ const ( OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPopCountUint32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 OpSubUint32x4 + OpUnsignedSignedQuadDotProdAccumulateUint32x4 OpXorUint32x4 OpAddUint32x8 OpAndUint32x8 @@ -4972,7 +5028,9 @@ const ( OpMaskedNotEqualUint32x8 OpMaskedOrUint32x8 OpMaskedPopCountUint32x8 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 OpMaskedSubUint32x8 + OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8 OpMaskedXorUint32x8 OpMaxUint32x8 OpMinUint32x8 @@ -4982,7 +5040,9 @@ const ( OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPopCountUint32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 OpSubUint32x8 + OpUnsignedSignedQuadDotProdAccumulateUint32x8 OpXorUint32x8 OpAddUint64x2 OpAndUint64x2 @@ -22116,6 +22176,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTDMasked512", argLen: 2, @@ -22130,6 +22207,40 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked512", argLen: 3, @@ -22145,6 +22256,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORDMasked512", argLen: 3, @@ -22221,6 +22349,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTD512", argLen: 1, @@ -22234,6 +22378,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBD512", argLen: 2, @@ -22248,6 +22424,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORD512", argLen: 2, @@ -22477,6 +22669,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTDMasked128", argLen: 2, @@ -22491,6 +22700,40 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked128", argLen: 3, @@ -22506,6 +22749,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORDMasked128", argLen: 3, @@ -22582,6 +22842,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD128", argLen: 2, @@ -22623,6 +22899,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND128", argLen: 2, @@ -22651,6 +22959,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSD256", argLen: 1, @@ -22865,6 +23189,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTDMasked256", argLen: 2, @@ -22879,6 +23220,40 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked256", argLen: 3, @@ -22894,6 +23269,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORDMasked256", argLen: 3, @@ -22970,6 +23362,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD256", argLen: 2, @@ -23011,6 +23419,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND256", argLen: 2, @@ -23039,6 +23479,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSQ128", argLen: 1, @@ -57134,16 +57590,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, { name: "MaskedPopCountInt32x16", argLen: 2, generic: true, }, + { + name: "MaskedSaturatedPairDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, { name: "MaskedSubInt32x16", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, { name: "MaskedXorInt32x16", argLen: 3, @@ -57180,16 +57656,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, { name: "PopCountInt32x16", argLen: 1, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, { name: "SubInt32x16", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, { name: "XorInt32x16", argLen: 2, @@ -57324,16 +57820,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, { name: "MaskedPopCountInt32x4", argLen: 2, generic: true, }, + { + name: "MaskedSaturatedPairDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, { name: "MaskedSubInt32x4", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, { name: "MaskedXorInt32x4", argLen: 3, @@ -57376,6 +57892,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt32x4", argLen: 2, @@ -57391,6 +57912,16 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, { name: "SignInt32x4", argLen: 2, @@ -57401,6 +57932,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, { name: "XorInt32x4", argLen: 2, @@ -57535,16 +58071,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, { name: "MaskedPopCountInt32x8", argLen: 2, generic: true, }, + { + name: "MaskedSaturatedPairDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, { name: "MaskedSubInt32x8", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, { name: "MaskedXorInt32x8", argLen: 3, @@ -57587,6 +58143,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt32x8", argLen: 2, @@ -57602,6 +58163,16 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, { name: "SignInt32x8", argLen: 2, @@ -57612,6 +58183,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, { name: "XorInt32x8", argLen: 2, @@ -59451,11 +60027,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, + generic: true, + }, { name: "MaskedSubUint32x16", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, + generic: true, + }, { name: "MaskedXorUint32x16", argLen: 3, @@ -59491,11 +60077,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, + }, { name: "SubUint32x16", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, + }, { name: "XorUint32x16", argLen: 2, @@ -59619,11 +60215,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, { name: "MaskedSubUint32x4", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, { name: "MaskedXorUint32x4", argLen: 3, @@ -59675,11 +60281,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, { name: "SubUint32x4", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, { name: "XorUint32x4", argLen: 2, @@ -59803,11 +60419,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, { name: "MaskedSubUint32x8", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, { name: "MaskedXorUint32x8", argLen: 3, @@ -59859,11 +60485,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, { name: "SubUint32x8", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, { name: "XorUint32x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3605e75213c542..60469f49d944da 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2696,6 +2696,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedOrUint64x4(v) case OpMaskedOrUint64x8: return rewriteValueAMD64_OpMaskedOrUint64x8(v) + case OpMaskedPairDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v) + case OpMaskedPairDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v) + case OpMaskedPairDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v) case OpMaskedPairDotProdInt16x16: return rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v) case OpMaskedPairDotProdInt16x32: @@ -2798,6 +2804,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v) case OpMaskedSaturatedAddUint8x64: return rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v) + case OpMaskedSaturatedPairDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v) + case OpMaskedSaturatedPairDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v) + case OpMaskedSaturatedPairDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v) case OpMaskedSaturatedSubInt16x16: return rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v) case OpMaskedSaturatedSubInt16x32: @@ -2828,6 +2840,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v) case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8: return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v) case OpMaskedSqrtFloat32x16: return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) case OpMaskedSqrtFloat32x4: @@ -2924,6 +2948,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v) case OpMaskedTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v) case OpMaskedXorFloat32x16: return rewriteValueAMD64_OpMaskedXorFloat32x16(v) case OpMaskedXorFloat32x4: @@ -3490,6 +3526,15 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true + case OpPairDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPWSSD512 + return true + case OpPairDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPWSSD128 + return true + case OpPairDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPWSSD256 + return true case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -3813,6 +3858,15 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedAddUint8x64: v.Op = OpAMD64VPADDSB512 return true + case OpSaturatedPairDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPWSSDS512 + return true + case OpSaturatedPairDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPWSSDS128 + return true + case OpSaturatedPairDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPWSSDS256 + return true case OpSaturatedPairwiseAddInt16x16: v.Op = OpAMD64VPHADDSW256 return true @@ -3876,6 +3930,24 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedUnsignedSignedPairDotProdUint8x32: v.Op = OpAMD64VPMADDUBSW256 return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPBUSDS512 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPBUSDS128 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPBUSDS256 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: + v.Op = OpAMD64VPDPBUSDS512 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: + v.Op = OpAMD64VPDPBUSDS128 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: + v.Op = OpAMD64VPDPBUSDS256 + return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -4119,6 +4191,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) case OpTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) + case OpUnsignedSignedQuadDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPBUSD512 + return true + case OpUnsignedSignedQuadDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPBUSD128 + return true + case OpUnsignedSignedQuadDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPBUSD256 + return true + case OpUnsignedSignedQuadDotProdAccumulateUint32x16: + v.Op = OpAMD64VPDPBUSD512 + return true + case OpUnsignedSignedQuadDotProdAccumulateUint32x4: + v.Op = OpAMD64VPDPBUSD128 + return true + case OpUnsignedSignedQuadDotProdAccumulateUint32x8: + v.Op = OpAMD64VPDPBUSD256 + return true case OpWB: v.Op = OpAMD64LoweredWB return true @@ -42772,6 +42862,66 @@ func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43642,6 +43792,66 @@ func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43912,6 +44122,126 @@ func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v *Val return true } } +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -44764,6 +45094,126 @@ func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 7ac5f74246edc8..b7b80a706311ea 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -833,6 +833,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) @@ -848,6 +852,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) @@ -863,6 +871,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) @@ -1006,6 +1018,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) @@ -1020,6 +1034,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) @@ -1034,6 +1050,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) @@ -1118,6 +1136,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 83edaf2270025e..49af32bc4fca61 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -766,6 +766,7 @@ func (x Float64x2) AndNot(y Float64x2) Float64x2 func (x Float64x2) Div(y Float64x2) Float64x2 // DotProdBroadcast multiplies all elements and broadcasts the sum. +// Const Immediate = 127. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 @@ -4437,6 +4438,26 @@ func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -4518,6 +4539,26 @@ func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -4599,6 +4640,26 @@ func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 + // Add adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX @@ -5380,6 +5441,16 @@ func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -5456,6 +5527,16 @@ func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -5532,6 +5613,16 @@ func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 + // Add adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX @@ -5991,6 +6082,96 @@ func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 + // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // From ca01eab9c7c9c4987a36f6887e332a1fcba757f0 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:45:00 +0000 Subject: [PATCH 025/139] [dev.simd] cmd/compile: add fused mul add sub ops This CL is generated by CL 680595. Change-Id: I5e06ea9bc6a62593fc3b00fd44c119a5ed0d9e90 Reviewed-on: https://go-review.googlesource.com/c/go/+/681299 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 328 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 216 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 216 + .../internal/ssa/_gen/simdgenericOps.go | 216 + src/cmd/compile/internal/ssa/opGen.go | 6254 +++++++++++++++-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 2700 +++++++ .../compile/internal/ssagen/simdintrinsics.go | 216 + src/simd/stubs_amd64.go | 1080 +++ 8 files changed, 10635 insertions(+), 591 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 7e9abbd3cbe317..5fc068c895c6b7 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -679,7 +679,115 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPBMasked512: p = simdFp2k1k1Imm8(s, v) - case ssa.OpAMD64VPDPWSSD128, + case ssa.OpAMD64VFMADD132PS512, + ssa.OpAMD64VFMADD132PS128, + ssa.OpAMD64VFMADD132PS256, + ssa.OpAMD64VFMADD132PD128, + ssa.OpAMD64VFMADD132PD256, + ssa.OpAMD64VFMADD132PD512, + ssa.OpAMD64VFMADD213PS512, + ssa.OpAMD64VFMADD213PS128, + ssa.OpAMD64VFMADD213PS256, + ssa.OpAMD64VFMADD213PD128, + ssa.OpAMD64VFMADD213PD256, + ssa.OpAMD64VFMADD213PD512, + ssa.OpAMD64VFMADD231PS512, + ssa.OpAMD64VFMADD231PS128, + ssa.OpAMD64VFMADD231PS256, + ssa.OpAMD64VFMADD231PD128, + ssa.OpAMD64VFMADD231PD256, + ssa.OpAMD64VFMADD231PD512, + ssa.OpAMD64VFMADDSUB132PS512, + ssa.OpAMD64VFMADDSUB132PS128, + ssa.OpAMD64VFMADDSUB132PS256, + ssa.OpAMD64VFMADDSUB132PD128, + ssa.OpAMD64VFMADDSUB132PD256, + ssa.OpAMD64VFMADDSUB132PD512, + ssa.OpAMD64VFMADDSUB213PS512, + ssa.OpAMD64VFMADDSUB213PS128, + ssa.OpAMD64VFMADDSUB213PS256, + ssa.OpAMD64VFMADDSUB213PD128, + ssa.OpAMD64VFMADDSUB213PD256, + ssa.OpAMD64VFMADDSUB213PD512, + ssa.OpAMD64VFMADDSUB231PS512, + ssa.OpAMD64VFMADDSUB231PS128, + ssa.OpAMD64VFMADDSUB231PS256, + ssa.OpAMD64VFMADDSUB231PD128, + ssa.OpAMD64VFMADDSUB231PD256, + ssa.OpAMD64VFMADDSUB231PD512, + ssa.OpAMD64VFMSUB132PS512, + ssa.OpAMD64VFMSUB132PS128, + ssa.OpAMD64VFMSUB132PS256, + ssa.OpAMD64VFMSUB132PD128, + ssa.OpAMD64VFMSUB132PD256, + ssa.OpAMD64VFMSUB132PD512, + ssa.OpAMD64VFMSUB213PS512, + ssa.OpAMD64VFMSUB213PS128, + ssa.OpAMD64VFMSUB213PS256, + ssa.OpAMD64VFMSUB213PD128, + ssa.OpAMD64VFMSUB213PD256, + ssa.OpAMD64VFMSUB213PD512, + ssa.OpAMD64VFMSUB231PS512, + ssa.OpAMD64VFMSUB231PS128, + ssa.OpAMD64VFMSUB231PS256, + ssa.OpAMD64VFMSUB231PD128, + ssa.OpAMD64VFMSUB231PD256, + ssa.OpAMD64VFMSUB231PD512, + ssa.OpAMD64VFMSUBADD132PS512, + ssa.OpAMD64VFMSUBADD132PS128, + ssa.OpAMD64VFMSUBADD132PS256, + ssa.OpAMD64VFMSUBADD132PD128, + ssa.OpAMD64VFMSUBADD132PD256, + ssa.OpAMD64VFMSUBADD132PD512, + ssa.OpAMD64VFMSUBADD213PS512, + ssa.OpAMD64VFMSUBADD213PS128, + ssa.OpAMD64VFMSUBADD213PS256, + ssa.OpAMD64VFMSUBADD213PD128, + ssa.OpAMD64VFMSUBADD213PD256, + ssa.OpAMD64VFMSUBADD213PD512, + ssa.OpAMD64VFMSUBADD231PS512, + ssa.OpAMD64VFMSUBADD231PS128, + ssa.OpAMD64VFMSUBADD231PS256, + ssa.OpAMD64VFMSUBADD231PD128, + ssa.OpAMD64VFMSUBADD231PD256, + ssa.OpAMD64VFMSUBADD231PD512, + ssa.OpAMD64VFNMADD132PS512, + ssa.OpAMD64VFNMADD132PS128, + ssa.OpAMD64VFNMADD132PS256, + ssa.OpAMD64VFNMADD132PD128, + ssa.OpAMD64VFNMADD132PD256, + ssa.OpAMD64VFNMADD132PD512, + ssa.OpAMD64VFNMADD213PS512, + ssa.OpAMD64VFNMADD213PS128, + ssa.OpAMD64VFNMADD213PS256, + ssa.OpAMD64VFNMADD213PD128, + ssa.OpAMD64VFNMADD213PD256, + ssa.OpAMD64VFNMADD213PD512, + ssa.OpAMD64VFNMADD231PS512, + ssa.OpAMD64VFNMADD231PS128, + ssa.OpAMD64VFNMADD231PS256, + ssa.OpAMD64VFNMADD231PD128, + ssa.OpAMD64VFNMADD231PD256, + ssa.OpAMD64VFNMADD231PD512, + ssa.OpAMD64VFNMSUB132PS512, + ssa.OpAMD64VFNMSUB132PS128, + ssa.OpAMD64VFNMSUB132PS256, + ssa.OpAMD64VFNMSUB132PD128, + ssa.OpAMD64VFNMSUB132PD256, + ssa.OpAMD64VFNMSUB132PD512, + ssa.OpAMD64VFNMSUB213PS512, + ssa.OpAMD64VFNMSUB213PS128, + ssa.OpAMD64VFNMSUB213PS256, + ssa.OpAMD64VFNMSUB213PD128, + ssa.OpAMD64VFNMSUB213PD256, + ssa.OpAMD64VFNMSUB213PD512, + ssa.OpAMD64VFNMSUB231PS512, + ssa.OpAMD64VFNMSUB231PS128, + ssa.OpAMD64VFNMSUB231PS256, + ssa.OpAMD64VFNMSUB231PD128, + ssa.OpAMD64VFNMSUB231PD256, + ssa.OpAMD64VFNMSUB231PD512, + ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, ssa.OpAMD64VPDPWSSDS128, @@ -693,7 +801,115 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdFp31ResultInArg0(s, v) - case ssa.OpAMD64VPDPWSSDMasked512, + case ssa.OpAMD64VFMADD132PSMasked512, + ssa.OpAMD64VFMADD132PSMasked128, + ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PDMasked128, + ssa.OpAMD64VFMADD132PDMasked256, + ssa.OpAMD64VFMADD132PDMasked512, + ssa.OpAMD64VFMADD213PSMasked512, + ssa.OpAMD64VFMADD213PSMasked128, + ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PDMasked128, + ssa.OpAMD64VFMADD213PDMasked256, + ssa.OpAMD64VFMADD213PDMasked512, + ssa.OpAMD64VFMADD231PSMasked512, + ssa.OpAMD64VFMADD231PSMasked128, + ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PDMasked128, + ssa.OpAMD64VFMADD231PDMasked256, + ssa.OpAMD64VFMADD231PDMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked128, + ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked128, + ssa.OpAMD64VFMADDSUB132PDMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked128, + ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked128, + ssa.OpAMD64VFMADDSUB213PDMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked128, + ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked128, + ssa.OpAMD64VFMADDSUB231PDMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked512, + ssa.OpAMD64VFMSUB132PSMasked512, + ssa.OpAMD64VFMSUB132PSMasked128, + ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PDMasked128, + ssa.OpAMD64VFMSUB132PDMasked256, + ssa.OpAMD64VFMSUB132PDMasked512, + ssa.OpAMD64VFMSUB213PSMasked512, + ssa.OpAMD64VFMSUB213PSMasked128, + ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PDMasked128, + ssa.OpAMD64VFMSUB213PDMasked256, + ssa.OpAMD64VFMSUB213PDMasked512, + ssa.OpAMD64VFMSUB231PSMasked512, + ssa.OpAMD64VFMSUB231PSMasked128, + ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PDMasked128, + ssa.OpAMD64VFMSUB231PDMasked256, + ssa.OpAMD64VFMSUB231PDMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked128, + ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked128, + ssa.OpAMD64VFMSUBADD132PDMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked128, + ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked128, + ssa.OpAMD64VFMSUBADD213PDMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked128, + ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked128, + ssa.OpAMD64VFMSUBADD231PDMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked512, + ssa.OpAMD64VFNMADD132PSMasked512, + ssa.OpAMD64VFNMADD132PSMasked128, + ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PDMasked128, + ssa.OpAMD64VFNMADD132PDMasked256, + ssa.OpAMD64VFNMADD132PDMasked512, + ssa.OpAMD64VFNMADD213PSMasked512, + ssa.OpAMD64VFNMADD213PSMasked128, + ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PDMasked128, + ssa.OpAMD64VFNMADD213PDMasked256, + ssa.OpAMD64VFNMADD213PDMasked512, + ssa.OpAMD64VFNMADD231PSMasked512, + ssa.OpAMD64VFNMADD231PSMasked128, + ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PDMasked128, + ssa.OpAMD64VFNMADD231PDMasked256, + ssa.OpAMD64VFNMADD231PDMasked512, + ssa.OpAMD64VFNMSUB132PSMasked512, + ssa.OpAMD64VFNMSUB132PSMasked128, + ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PDMasked128, + ssa.OpAMD64VFNMSUB132PDMasked256, + ssa.OpAMD64VFNMSUB132PDMasked512, + ssa.OpAMD64VFNMSUB213PSMasked512, + ssa.OpAMD64VFNMSUB213PSMasked128, + ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PDMasked128, + ssa.OpAMD64VFNMSUB213PDMasked256, + ssa.OpAMD64VFNMSUB213PDMasked512, + ssa.OpAMD64VFNMSUB231PSMasked512, + ssa.OpAMD64VFNMSUB231PSMasked128, + ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PDMasked128, + ssa.OpAMD64VFNMSUB231PDMasked256, + ssa.OpAMD64VFNMSUB231PDMasked512, + ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDSMasked512, @@ -804,6 +1020,114 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VFMADD132PSMasked512, + ssa.OpAMD64VFMADD132PSMasked128, + ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PDMasked128, + ssa.OpAMD64VFMADD132PDMasked256, + ssa.OpAMD64VFMADD132PDMasked512, + ssa.OpAMD64VFMADD213PSMasked512, + ssa.OpAMD64VFMADD213PSMasked128, + ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PDMasked128, + ssa.OpAMD64VFMADD213PDMasked256, + ssa.OpAMD64VFMADD213PDMasked512, + ssa.OpAMD64VFMADD231PSMasked512, + ssa.OpAMD64VFMADD231PSMasked128, + ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PDMasked128, + ssa.OpAMD64VFMADD231PDMasked256, + ssa.OpAMD64VFMADD231PDMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked128, + ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked128, + ssa.OpAMD64VFMADDSUB132PDMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked128, + ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked128, + ssa.OpAMD64VFMADDSUB213PDMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked128, + ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked128, + ssa.OpAMD64VFMADDSUB231PDMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked512, + ssa.OpAMD64VFMSUB132PSMasked512, + ssa.OpAMD64VFMSUB132PSMasked128, + ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PDMasked128, + ssa.OpAMD64VFMSUB132PDMasked256, + ssa.OpAMD64VFMSUB132PDMasked512, + ssa.OpAMD64VFMSUB213PSMasked512, + ssa.OpAMD64VFMSUB213PSMasked128, + ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PDMasked128, + ssa.OpAMD64VFMSUB213PDMasked256, + ssa.OpAMD64VFMSUB213PDMasked512, + ssa.OpAMD64VFMSUB231PSMasked512, + ssa.OpAMD64VFMSUB231PSMasked128, + ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PDMasked128, + ssa.OpAMD64VFMSUB231PDMasked256, + ssa.OpAMD64VFMSUB231PDMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked128, + ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked128, + ssa.OpAMD64VFMSUBADD132PDMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked128, + ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked128, + ssa.OpAMD64VFMSUBADD213PDMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked128, + ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked128, + ssa.OpAMD64VFMSUBADD231PDMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked512, + ssa.OpAMD64VFNMADD132PSMasked512, + ssa.OpAMD64VFNMADD132PSMasked128, + ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PDMasked128, + ssa.OpAMD64VFNMADD132PDMasked256, + ssa.OpAMD64VFNMADD132PDMasked512, + ssa.OpAMD64VFNMADD213PSMasked512, + ssa.OpAMD64VFNMADD213PSMasked128, + ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PDMasked128, + ssa.OpAMD64VFNMADD213PDMasked256, + ssa.OpAMD64VFNMADD213PDMasked512, + ssa.OpAMD64VFNMADD231PSMasked512, + ssa.OpAMD64VFNMADD231PSMasked128, + ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PDMasked128, + ssa.OpAMD64VFNMADD231PDMasked256, + ssa.OpAMD64VFNMADD231PDMasked512, + ssa.OpAMD64VFNMSUB132PSMasked512, + ssa.OpAMD64VFNMSUB132PSMasked128, + ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PDMasked128, + ssa.OpAMD64VFNMSUB132PDMasked256, + ssa.OpAMD64VFNMSUB132PDMasked512, + ssa.OpAMD64VFNMSUB213PSMasked512, + ssa.OpAMD64VFNMSUB213PSMasked128, + ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PDMasked128, + ssa.OpAMD64VFNMSUB213PDMasked256, + ssa.OpAMD64VFNMSUB213PDMasked512, + ssa.OpAMD64VFNMSUB231PSMasked512, + ssa.OpAMD64VFNMSUB231PSMasked128, + ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PDMasked128, + ssa.OpAMD64VFNMSUB231PDMasked256, + ssa.OpAMD64VFNMSUB231PDMasked512, ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index efee484b9993c6..add066a3b6dcff 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -233,6 +233,114 @@ (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) +(FusedMultiplyAdd132Float32x16 ...) => (VFMADD132PS512 ...) +(FusedMultiplyAdd132Float32x4 ...) => (VFMADD132PS128 ...) +(FusedMultiplyAdd132Float32x8 ...) => (VFMADD132PS256 ...) +(FusedMultiplyAdd132Float64x2 ...) => (VFMADD132PD128 ...) +(FusedMultiplyAdd132Float64x4 ...) => (VFMADD132PD256 ...) +(FusedMultiplyAdd132Float64x8 ...) => (VFMADD132PD512 ...) +(FusedMultiplyAdd213Float32x16 ...) => (VFMADD213PS512 ...) +(FusedMultiplyAdd213Float32x4 ...) => (VFMADD213PS128 ...) +(FusedMultiplyAdd213Float32x8 ...) => (VFMADD213PS256 ...) +(FusedMultiplyAdd213Float64x2 ...) => (VFMADD213PD128 ...) +(FusedMultiplyAdd213Float64x4 ...) => (VFMADD213PD256 ...) +(FusedMultiplyAdd213Float64x8 ...) => (VFMADD213PD512 ...) +(FusedMultiplyAdd231Float32x16 ...) => (VFMADD231PS512 ...) +(FusedMultiplyAdd231Float32x4 ...) => (VFMADD231PS128 ...) +(FusedMultiplyAdd231Float32x8 ...) => (VFMADD231PS256 ...) +(FusedMultiplyAdd231Float64x2 ...) => (VFMADD231PD128 ...) +(FusedMultiplyAdd231Float64x4 ...) => (VFMADD231PD256 ...) +(FusedMultiplyAdd231Float64x8 ...) => (VFMADD231PD512 ...) +(FusedMultiplyAddSub132Float32x16 ...) => (VFMADDSUB132PS512 ...) +(FusedMultiplyAddSub132Float32x4 ...) => (VFMADDSUB132PS128 ...) +(FusedMultiplyAddSub132Float32x8 ...) => (VFMADDSUB132PS256 ...) +(FusedMultiplyAddSub132Float64x2 ...) => (VFMADDSUB132PD128 ...) +(FusedMultiplyAddSub132Float64x4 ...) => (VFMADDSUB132PD256 ...) +(FusedMultiplyAddSub132Float64x8 ...) => (VFMADDSUB132PD512 ...) +(FusedMultiplyAddSub213Float32x16 ...) => (VFMADDSUB213PS512 ...) +(FusedMultiplyAddSub213Float32x4 ...) => (VFMADDSUB213PS128 ...) +(FusedMultiplyAddSub213Float32x8 ...) => (VFMADDSUB213PS256 ...) +(FusedMultiplyAddSub213Float64x2 ...) => (VFMADDSUB213PD128 ...) +(FusedMultiplyAddSub213Float64x4 ...) => (VFMADDSUB213PD256 ...) +(FusedMultiplyAddSub213Float64x8 ...) => (VFMADDSUB213PD512 ...) +(FusedMultiplyAddSub231Float32x16 ...) => (VFMADDSUB231PS512 ...) +(FusedMultiplyAddSub231Float32x4 ...) => (VFMADDSUB231PS128 ...) +(FusedMultiplyAddSub231Float32x8 ...) => (VFMADDSUB231PS256 ...) +(FusedMultiplyAddSub231Float64x2 ...) => (VFMADDSUB231PD128 ...) +(FusedMultiplyAddSub231Float64x4 ...) => (VFMADDSUB231PD256 ...) +(FusedMultiplyAddSub231Float64x8 ...) => (VFMADDSUB231PD512 ...) +(FusedMultiplySub132Float32x16 ...) => (VFMSUB132PS512 ...) +(FusedMultiplySub132Float32x4 ...) => (VFMSUB132PS128 ...) +(FusedMultiplySub132Float32x8 ...) => (VFMSUB132PS256 ...) +(FusedMultiplySub132Float64x2 ...) => (VFMSUB132PD128 ...) +(FusedMultiplySub132Float64x4 ...) => (VFMSUB132PD256 ...) +(FusedMultiplySub132Float64x8 ...) => (VFMSUB132PD512 ...) +(FusedMultiplySub213Float32x16 ...) => (VFMSUB213PS512 ...) +(FusedMultiplySub213Float32x4 ...) => (VFMSUB213PS128 ...) +(FusedMultiplySub213Float32x8 ...) => (VFMSUB213PS256 ...) +(FusedMultiplySub213Float64x2 ...) => (VFMSUB213PD128 ...) +(FusedMultiplySub213Float64x4 ...) => (VFMSUB213PD256 ...) +(FusedMultiplySub213Float64x8 ...) => (VFMSUB213PD512 ...) +(FusedMultiplySub231Float32x16 ...) => (VFMSUB231PS512 ...) +(FusedMultiplySub231Float32x4 ...) => (VFMSUB231PS128 ...) +(FusedMultiplySub231Float32x8 ...) => (VFMSUB231PS256 ...) +(FusedMultiplySub231Float64x2 ...) => (VFMSUB231PD128 ...) +(FusedMultiplySub231Float64x4 ...) => (VFMSUB231PD256 ...) +(FusedMultiplySub231Float64x8 ...) => (VFMSUB231PD512 ...) +(FusedMultiplySubAdd132Float32x16 ...) => (VFMSUBADD132PS512 ...) +(FusedMultiplySubAdd132Float32x4 ...) => (VFMSUBADD132PS128 ...) +(FusedMultiplySubAdd132Float32x8 ...) => (VFMSUBADD132PS256 ...) +(FusedMultiplySubAdd132Float64x2 ...) => (VFMSUBADD132PD128 ...) +(FusedMultiplySubAdd132Float64x4 ...) => (VFMSUBADD132PD256 ...) +(FusedMultiplySubAdd132Float64x8 ...) => (VFMSUBADD132PD512 ...) +(FusedMultiplySubAdd213Float32x16 ...) => (VFMSUBADD213PS512 ...) +(FusedMultiplySubAdd213Float32x4 ...) => (VFMSUBADD213PS128 ...) +(FusedMultiplySubAdd213Float32x8 ...) => (VFMSUBADD213PS256 ...) +(FusedMultiplySubAdd213Float64x2 ...) => (VFMSUBADD213PD128 ...) +(FusedMultiplySubAdd213Float64x4 ...) => (VFMSUBADD213PD256 ...) +(FusedMultiplySubAdd213Float64x8 ...) => (VFMSUBADD213PD512 ...) +(FusedMultiplySubAdd231Float32x16 ...) => (VFMSUBADD231PS512 ...) +(FusedMultiplySubAdd231Float32x4 ...) => (VFMSUBADD231PS128 ...) +(FusedMultiplySubAdd231Float32x8 ...) => (VFMSUBADD231PS256 ...) +(FusedMultiplySubAdd231Float64x2 ...) => (VFMSUBADD231PD128 ...) +(FusedMultiplySubAdd231Float64x4 ...) => (VFMSUBADD231PD256 ...) +(FusedMultiplySubAdd231Float64x8 ...) => (VFMSUBADD231PD512 ...) +(FusedNegativeMultiplyAdd132Float32x16 ...) => (VFNMADD132PS512 ...) +(FusedNegativeMultiplyAdd132Float32x4 ...) => (VFNMADD132PS128 ...) +(FusedNegativeMultiplyAdd132Float32x8 ...) => (VFNMADD132PS256 ...) +(FusedNegativeMultiplyAdd132Float64x2 ...) => (VFNMADD132PD128 ...) +(FusedNegativeMultiplyAdd132Float64x4 ...) => (VFNMADD132PD256 ...) +(FusedNegativeMultiplyAdd132Float64x8 ...) => (VFNMADD132PD512 ...) +(FusedNegativeMultiplyAdd213Float32x16 ...) => (VFNMADD213PS512 ...) +(FusedNegativeMultiplyAdd213Float32x4 ...) => (VFNMADD213PS128 ...) +(FusedNegativeMultiplyAdd213Float32x8 ...) => (VFNMADD213PS256 ...) +(FusedNegativeMultiplyAdd213Float64x2 ...) => (VFNMADD213PD128 ...) +(FusedNegativeMultiplyAdd213Float64x4 ...) => (VFNMADD213PD256 ...) +(FusedNegativeMultiplyAdd213Float64x8 ...) => (VFNMADD213PD512 ...) +(FusedNegativeMultiplyAdd231Float32x16 ...) => (VFNMADD231PS512 ...) +(FusedNegativeMultiplyAdd231Float32x4 ...) => (VFNMADD231PS128 ...) +(FusedNegativeMultiplyAdd231Float32x8 ...) => (VFNMADD231PS256 ...) +(FusedNegativeMultiplyAdd231Float64x2 ...) => (VFNMADD231PD128 ...) +(FusedNegativeMultiplyAdd231Float64x4 ...) => (VFNMADD231PD256 ...) +(FusedNegativeMultiplyAdd231Float64x8 ...) => (VFNMADD231PD512 ...) +(FusedNegativeMultiplySub132Float32x16 ...) => (VFNMSUB132PS512 ...) +(FusedNegativeMultiplySub132Float32x4 ...) => (VFNMSUB132PS128 ...) +(FusedNegativeMultiplySub132Float32x8 ...) => (VFNMSUB132PS256 ...) +(FusedNegativeMultiplySub132Float64x2 ...) => (VFNMSUB132PD128 ...) +(FusedNegativeMultiplySub132Float64x4 ...) => (VFNMSUB132PD256 ...) +(FusedNegativeMultiplySub132Float64x8 ...) => (VFNMSUB132PD512 ...) +(FusedNegativeMultiplySub213Float32x16 ...) => (VFNMSUB213PS512 ...) +(FusedNegativeMultiplySub213Float32x4 ...) => (VFNMSUB213PS128 ...) +(FusedNegativeMultiplySub213Float32x8 ...) => (VFNMSUB213PS256 ...) +(FusedNegativeMultiplySub213Float64x2 ...) => (VFNMSUB213PD128 ...) +(FusedNegativeMultiplySub213Float64x4 ...) => (VFNMSUB213PD256 ...) +(FusedNegativeMultiplySub213Float64x8 ...) => (VFNMSUB213PD512 ...) +(FusedNegativeMultiplySub231Float32x16 ...) => (VFNMSUB231PS512 ...) +(FusedNegativeMultiplySub231Float32x4 ...) => (VFNMSUB231PS128 ...) +(FusedNegativeMultiplySub231Float32x8 ...) => (VFNMSUB231PS256 ...) +(FusedNegativeMultiplySub231Float64x2 ...) => (VFNMSUB231PD128 ...) +(FusedNegativeMultiplySub231Float64x4 ...) => (VFNMSUB231PD256 ...) +(FusedNegativeMultiplySub231Float64x8 ...) => (VFNMSUB231PD512 ...) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) @@ -563,6 +671,114 @@ (MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAdd132Float32x16 x y z mask) => (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAdd132Float32x4 x y z mask) => (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAdd132Float32x8 x y z mask) => (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAdd132Float64x2 x y z mask) => (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAdd132Float64x4 x y z mask) => (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAdd132Float64x8 x y z mask) => (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAdd213Float32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAdd213Float32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAdd213Float32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAdd213Float64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAdd213Float64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAdd213Float64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAdd231Float32x16 x y z mask) => (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAdd231Float32x4 x y z mask) => (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAdd231Float32x8 x y z mask) => (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAdd231Float64x2 x y z mask) => (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAdd231Float64x4 x y z mask) => (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAdd231Float64x8 x y z mask) => (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSub132Float32x16 x y z mask) => (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSub132Float32x4 x y z mask) => (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSub132Float32x8 x y z mask) => (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSub132Float64x2 x y z mask) => (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSub132Float64x4 x y z mask) => (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSub132Float64x8 x y z mask) => (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSub213Float32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSub213Float32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSub213Float32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSub213Float64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSub213Float64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSub213Float64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSub231Float32x16 x y z mask) => (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSub231Float32x4 x y z mask) => (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSub231Float32x8 x y z mask) => (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSub231Float64x2 x y z mask) => (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSub231Float64x4 x y z mask) => (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSub231Float64x8 x y z mask) => (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySub132Float32x16 x y z mask) => (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySub132Float32x4 x y z mask) => (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySub132Float32x8 x y z mask) => (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySub132Float64x2 x y z mask) => (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySub132Float64x4 x y z mask) => (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySub132Float64x8 x y z mask) => (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySub213Float32x16 x y z mask) => (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySub213Float32x4 x y z mask) => (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySub213Float32x8 x y z mask) => (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySub213Float64x2 x y z mask) => (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySub213Float64x4 x y z mask) => (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySub213Float64x8 x y z mask) => (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySub231Float32x16 x y z mask) => (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySub231Float32x4 x y z mask) => (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySub231Float32x8 x y z mask) => (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySub231Float64x2 x y z mask) => (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySub231Float64x4 x y z mask) => (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySub231Float64x8 x y z mask) => (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAdd132Float32x16 x y z mask) => (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAdd132Float32x4 x y z mask) => (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAdd132Float32x8 x y z mask) => (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAdd132Float64x2 x y z mask) => (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAdd132Float64x4 x y z mask) => (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAdd132Float64x8 x y z mask) => (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAdd213Float32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAdd213Float32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAdd213Float32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAdd213Float64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAdd213Float64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAdd213Float64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAdd231Float32x16 x y z mask) => (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAdd231Float32x4 x y z mask) => (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAdd231Float32x8 x y z mask) => (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAdd231Float64x2 x y z mask) => (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAdd231Float64x4 x y z mask) => (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAdd231Float64x8 x y z mask) => (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) => (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) => (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) => (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) => (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) => (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) => (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) => (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) => (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) => (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) => (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) => (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) => (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) => (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) => (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) => (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) => (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) => (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) => (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) => (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) => (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) => (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) => (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) => (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) => (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) => (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) => (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) => (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) => (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) => (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) => (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) => (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) => (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) => (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) => (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) => (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) => (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 6cc405c0300fcb..b9a7bc59a56152 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -9,12 +9,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PS512", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PS512", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PS512", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PS512", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PS512", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PS512", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PS512", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PS512", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PS512", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PS512", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -36,12 +72,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PS128", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PS128", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PS128", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PS128", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PS128", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PS128", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PS128", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PS128", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PS128", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PS128", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -65,12 +137,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PS256", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PS256", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PS256", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PS256", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PS256", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PS256", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PS256", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PS256", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PS256", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PS256", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -94,12 +202,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PD128", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PD128", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PD128", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PD128", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PD128", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PD128", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PD128", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PD128", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PD128", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PD128", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -123,12 +267,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PD256", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PD256", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PD256", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PD256", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PD256", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PD256", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PD256", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PD256", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PD256", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PD256", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -151,12 +331,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PD512", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PD512", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PD512", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PD512", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PD512", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PD512", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PD512", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PD512", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PD512", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PD512", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 404f1fc69fd07d..5c86f280913c7b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -10,6 +10,24 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "FusedMultiplyAdd132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float32x16", argLength: 3, commutative: false}, {name: "GreaterFloat32x16", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, {name: "IsNanFloat32x16", argLength: 2, commutative: true}, @@ -22,6 +40,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float32x16", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, @@ -55,6 +91,24 @@ func simdGenericOps() []opData { {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float32x4", argLength: 3, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, @@ -67,6 +121,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float32x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, @@ -104,6 +176,24 @@ func simdGenericOps() []opData { {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float32x8", argLength: 3, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -116,6 +206,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float32x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, @@ -154,6 +262,24 @@ func simdGenericOps() []opData { {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float64x2", argLength: 3, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, @@ -166,6 +292,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float64x2", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, @@ -203,6 +347,24 @@ func simdGenericOps() []opData { {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float64x4", argLength: 3, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, @@ -215,6 +377,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float64x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, @@ -249,6 +429,24 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "FusedMultiplyAdd132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float64x8", argLength: 3, commutative: false}, {name: "GreaterFloat64x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, @@ -261,6 +459,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float64x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 26facad933461b..106f3e16574733 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1202,12 +1202,48 @@ const ( OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 + OpAMD64VFMADD132PS512 + OpAMD64VFMADD213PS512 + OpAMD64VFMADD231PS512 + OpAMD64VFMADDSUB132PS512 + OpAMD64VFMADDSUB213PS512 + OpAMD64VFMADDSUB231PS512 + OpAMD64VFMSUB132PS512 + OpAMD64VFMSUB213PS512 + OpAMD64VFMSUB231PS512 + OpAMD64VFMSUBADD132PS512 + OpAMD64VFMSUBADD213PS512 + OpAMD64VFMSUBADD231PS512 + OpAMD64VFNMADD132PS512 + OpAMD64VFNMADD213PS512 + OpAMD64VFNMADD231PS512 + OpAMD64VFNMSUB132PS512 + OpAMD64VFNMSUB213PS512 + OpAMD64VFNMSUB231PS512 OpAMD64VADDPSMasked512 OpAMD64VANDPSMasked512 OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PSMasked512 OpAMD64VDIVPSMasked512 + OpAMD64VFMADD132PSMasked512 + OpAMD64VFMADD213PSMasked512 + OpAMD64VFMADD231PSMasked512 + OpAMD64VFMADDSUB132PSMasked512 + OpAMD64VFMADDSUB213PSMasked512 + OpAMD64VFMADDSUB231PSMasked512 + OpAMD64VFMSUB132PSMasked512 + OpAMD64VFMSUB213PSMasked512 + OpAMD64VFMSUB231PSMasked512 + OpAMD64VFMSUBADD132PSMasked512 + OpAMD64VFMSUBADD213PSMasked512 + OpAMD64VFMSUBADD231PSMasked512 + OpAMD64VFNMADD132PSMasked512 + OpAMD64VFNMADD213PSMasked512 + OpAMD64VFNMADD231PSMasked512 + OpAMD64VFNMSUB132PSMasked512 + OpAMD64VFNMSUB213PSMasked512 + OpAMD64VFNMSUB231PSMasked512 OpAMD64VMAXPSMasked512 OpAMD64VMINPSMasked512 OpAMD64VMULPSMasked512 @@ -1229,12 +1265,48 @@ const ( OpAMD64VRCP14PS128 OpAMD64VRSQRTPS128 OpAMD64VDIVPS128 + OpAMD64VFMADD132PS128 + OpAMD64VFMADD213PS128 + OpAMD64VFMADD231PS128 + OpAMD64VFMADDSUB132PS128 + OpAMD64VFMADDSUB213PS128 + OpAMD64VFMADDSUB231PS128 + OpAMD64VFMSUB132PS128 + OpAMD64VFMSUB213PS128 + OpAMD64VFMSUB231PS128 + OpAMD64VFMSUBADD132PS128 + OpAMD64VFMSUBADD213PS128 + OpAMD64VFMSUBADD231PS128 + OpAMD64VFNMADD132PS128 + OpAMD64VFNMADD213PS128 + OpAMD64VFNMADD231PS128 + OpAMD64VFNMSUB132PS128 + OpAMD64VFNMSUB213PS128 + OpAMD64VFNMSUB231PS128 OpAMD64VADDPSMasked128 OpAMD64VANDPSMasked128 OpAMD64VANDNPSMasked128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRT14PSMasked128 OpAMD64VDIVPSMasked128 + OpAMD64VFMADD132PSMasked128 + OpAMD64VFMADD213PSMasked128 + OpAMD64VFMADD231PSMasked128 + OpAMD64VFMADDSUB132PSMasked128 + OpAMD64VFMADDSUB213PSMasked128 + OpAMD64VFMADDSUB231PSMasked128 + OpAMD64VFMSUB132PSMasked128 + OpAMD64VFMSUB213PSMasked128 + OpAMD64VFMSUB231PSMasked128 + OpAMD64VFMSUBADD132PSMasked128 + OpAMD64VFMSUBADD213PSMasked128 + OpAMD64VFMSUBADD231PSMasked128 + OpAMD64VFNMADD132PSMasked128 + OpAMD64VFNMADD213PSMasked128 + OpAMD64VFNMADD231PSMasked128 + OpAMD64VFNMSUB132PSMasked128 + OpAMD64VFNMSUB213PSMasked128 + OpAMD64VFNMSUB231PSMasked128 OpAMD64VMAXPSMasked128 OpAMD64VMINPSMasked128 OpAMD64VMULPSMasked128 @@ -1258,12 +1330,48 @@ const ( OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 + OpAMD64VFMADD132PS256 + OpAMD64VFMADD213PS256 + OpAMD64VFMADD231PS256 + OpAMD64VFMADDSUB132PS256 + OpAMD64VFMADDSUB213PS256 + OpAMD64VFMADDSUB231PS256 + OpAMD64VFMSUB132PS256 + OpAMD64VFMSUB213PS256 + OpAMD64VFMSUB231PS256 + OpAMD64VFMSUBADD132PS256 + OpAMD64VFMSUBADD213PS256 + OpAMD64VFMSUBADD231PS256 + OpAMD64VFNMADD132PS256 + OpAMD64VFNMADD213PS256 + OpAMD64VFNMADD231PS256 + OpAMD64VFNMSUB132PS256 + OpAMD64VFNMSUB213PS256 + OpAMD64VFNMSUB231PS256 OpAMD64VADDPSMasked256 OpAMD64VANDPSMasked256 OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRT14PSMasked256 OpAMD64VDIVPSMasked256 + OpAMD64VFMADD132PSMasked256 + OpAMD64VFMADD213PSMasked256 + OpAMD64VFMADD231PSMasked256 + OpAMD64VFMADDSUB132PSMasked256 + OpAMD64VFMADDSUB213PSMasked256 + OpAMD64VFMADDSUB231PSMasked256 + OpAMD64VFMSUB132PSMasked256 + OpAMD64VFMSUB213PSMasked256 + OpAMD64VFMSUB231PSMasked256 + OpAMD64VFMSUBADD132PSMasked256 + OpAMD64VFMSUBADD213PSMasked256 + OpAMD64VFMSUBADD231PSMasked256 + OpAMD64VFNMADD132PSMasked256 + OpAMD64VFNMADD213PSMasked256 + OpAMD64VFNMADD231PSMasked256 + OpAMD64VFNMSUB132PSMasked256 + OpAMD64VFNMSUB213PSMasked256 + OpAMD64VFNMSUB231PSMasked256 OpAMD64VMAXPSMasked256 OpAMD64VMINPSMasked256 OpAMD64VMULPSMasked256 @@ -1287,12 +1395,48 @@ const ( OpAMD64VRCP14PD128 OpAMD64VRSQRT14PD128 OpAMD64VDIVPD128 + OpAMD64VFMADD132PD128 + OpAMD64VFMADD213PD128 + OpAMD64VFMADD231PD128 + OpAMD64VFMADDSUB132PD128 + OpAMD64VFMADDSUB213PD128 + OpAMD64VFMADDSUB231PD128 + OpAMD64VFMSUB132PD128 + OpAMD64VFMSUB213PD128 + OpAMD64VFMSUB231PD128 + OpAMD64VFMSUBADD132PD128 + OpAMD64VFMSUBADD213PD128 + OpAMD64VFMSUBADD231PD128 + OpAMD64VFNMADD132PD128 + OpAMD64VFNMADD213PD128 + OpAMD64VFNMADD231PD128 + OpAMD64VFNMSUB132PD128 + OpAMD64VFNMSUB213PD128 + OpAMD64VFNMSUB231PD128 OpAMD64VADDPDMasked128 OpAMD64VANDPDMasked128 OpAMD64VANDNPDMasked128 OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PDMasked128 OpAMD64VDIVPDMasked128 + OpAMD64VFMADD132PDMasked128 + OpAMD64VFMADD213PDMasked128 + OpAMD64VFMADD231PDMasked128 + OpAMD64VFMADDSUB132PDMasked128 + OpAMD64VFMADDSUB213PDMasked128 + OpAMD64VFMADDSUB231PDMasked128 + OpAMD64VFMSUB132PDMasked128 + OpAMD64VFMSUB213PDMasked128 + OpAMD64VFMSUB231PDMasked128 + OpAMD64VFMSUBADD132PDMasked128 + OpAMD64VFMSUBADD213PDMasked128 + OpAMD64VFMSUBADD231PDMasked128 + OpAMD64VFNMADD132PDMasked128 + OpAMD64VFNMADD213PDMasked128 + OpAMD64VFNMADD231PDMasked128 + OpAMD64VFNMSUB132PDMasked128 + OpAMD64VFNMSUB213PDMasked128 + OpAMD64VFNMSUB231PDMasked128 OpAMD64VMAXPDMasked128 OpAMD64VMINPDMasked128 OpAMD64VMULPDMasked128 @@ -1316,12 +1460,48 @@ const ( OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 + OpAMD64VFMADD132PD256 + OpAMD64VFMADD213PD256 + OpAMD64VFMADD231PD256 + OpAMD64VFMADDSUB132PD256 + OpAMD64VFMADDSUB213PD256 + OpAMD64VFMADDSUB231PD256 + OpAMD64VFMSUB132PD256 + OpAMD64VFMSUB213PD256 + OpAMD64VFMSUB231PD256 + OpAMD64VFMSUBADD132PD256 + OpAMD64VFMSUBADD213PD256 + OpAMD64VFMSUBADD231PD256 + OpAMD64VFNMADD132PD256 + OpAMD64VFNMADD213PD256 + OpAMD64VFNMADD231PD256 + OpAMD64VFNMSUB132PD256 + OpAMD64VFNMSUB213PD256 + OpAMD64VFNMSUB231PD256 OpAMD64VADDPDMasked256 OpAMD64VANDPDMasked256 OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PDMasked256 OpAMD64VDIVPDMasked256 + OpAMD64VFMADD132PDMasked256 + OpAMD64VFMADD213PDMasked256 + OpAMD64VFMADD231PDMasked256 + OpAMD64VFMADDSUB132PDMasked256 + OpAMD64VFMADDSUB213PDMasked256 + OpAMD64VFMADDSUB231PDMasked256 + OpAMD64VFMSUB132PDMasked256 + OpAMD64VFMSUB213PDMasked256 + OpAMD64VFMSUB231PDMasked256 + OpAMD64VFMSUBADD132PDMasked256 + OpAMD64VFMSUBADD213PDMasked256 + OpAMD64VFMSUBADD231PDMasked256 + OpAMD64VFNMADD132PDMasked256 + OpAMD64VFNMADD213PDMasked256 + OpAMD64VFNMADD231PDMasked256 + OpAMD64VFNMSUB132PDMasked256 + OpAMD64VFNMSUB213PDMasked256 + OpAMD64VFNMSUB231PDMasked256 OpAMD64VMAXPDMasked256 OpAMD64VMINPDMasked256 OpAMD64VMULPDMasked256 @@ -1344,12 +1524,48 @@ const ( OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 + OpAMD64VFMADD132PD512 + OpAMD64VFMADD213PD512 + OpAMD64VFMADD231PD512 + OpAMD64VFMADDSUB132PD512 + OpAMD64VFMADDSUB213PD512 + OpAMD64VFMADDSUB231PD512 + OpAMD64VFMSUB132PD512 + OpAMD64VFMSUB213PD512 + OpAMD64VFMSUB231PD512 + OpAMD64VFMSUBADD132PD512 + OpAMD64VFMSUBADD213PD512 + OpAMD64VFMSUBADD231PD512 + OpAMD64VFNMADD132PD512 + OpAMD64VFNMADD213PD512 + OpAMD64VFNMADD231PD512 + OpAMD64VFNMSUB132PD512 + OpAMD64VFNMSUB213PD512 + OpAMD64VFNMSUB231PD512 OpAMD64VADDPDMasked512 OpAMD64VANDPDMasked512 OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PDMasked512 OpAMD64VDIVPDMasked512 + OpAMD64VFMADD132PDMasked512 + OpAMD64VFMADD213PDMasked512 + OpAMD64VFMADD231PDMasked512 + OpAMD64VFMADDSUB132PDMasked512 + OpAMD64VFMADDSUB213PDMasked512 + OpAMD64VFMADDSUB231PDMasked512 + OpAMD64VFMSUB132PDMasked512 + OpAMD64VFMSUB213PDMasked512 + OpAMD64VFMSUB231PDMasked512 + OpAMD64VFMSUBADD132PDMasked512 + OpAMD64VFMSUBADD213PDMasked512 + OpAMD64VFMSUBADD231PDMasked512 + OpAMD64VFNMADD132PDMasked512 + OpAMD64VFNMADD213PDMasked512 + OpAMD64VFNMADD231PDMasked512 + OpAMD64VFNMSUB132PDMasked512 + OpAMD64VFNMSUB213PDMasked512 + OpAMD64VFNMSUB231PDMasked512 OpAMD64VMAXPDMasked512 OpAMD64VMINPDMasked512 OpAMD64VMULPDMasked512 @@ -4098,6 +4314,24 @@ const ( OpApproximateReciprocalOfSqrtFloat32x16 OpDivFloat32x16 OpEqualFloat32x16 + OpFusedMultiplyAdd132Float32x16 + OpFusedMultiplyAdd213Float32x16 + OpFusedMultiplyAdd231Float32x16 + OpFusedMultiplyAddSub132Float32x16 + OpFusedMultiplyAddSub213Float32x16 + OpFusedMultiplyAddSub231Float32x16 + OpFusedMultiplySub132Float32x16 + OpFusedMultiplySub213Float32x16 + OpFusedMultiplySub231Float32x16 + OpFusedMultiplySubAdd132Float32x16 + OpFusedMultiplySubAdd213Float32x16 + OpFusedMultiplySubAdd231Float32x16 + OpFusedNegativeMultiplyAdd132Float32x16 + OpFusedNegativeMultiplyAdd213Float32x16 + OpFusedNegativeMultiplyAdd231Float32x16 + OpFusedNegativeMultiplySub132Float32x16 + OpFusedNegativeMultiplySub213Float32x16 + OpFusedNegativeMultiplySub231Float32x16 OpGreaterFloat32x16 OpGreaterEqualFloat32x16 OpIsNanFloat32x16 @@ -4110,6 +4344,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x16 OpMaskedDivFloat32x16 OpMaskedEqualFloat32x16 + OpMaskedFusedMultiplyAdd132Float32x16 + OpMaskedFusedMultiplyAdd213Float32x16 + OpMaskedFusedMultiplyAdd231Float32x16 + OpMaskedFusedMultiplyAddSub132Float32x16 + OpMaskedFusedMultiplyAddSub213Float32x16 + OpMaskedFusedMultiplyAddSub231Float32x16 + OpMaskedFusedMultiplySub132Float32x16 + OpMaskedFusedMultiplySub213Float32x16 + OpMaskedFusedMultiplySub231Float32x16 + OpMaskedFusedMultiplySubAdd132Float32x16 + OpMaskedFusedMultiplySubAdd213Float32x16 + OpMaskedFusedMultiplySubAdd231Float32x16 + OpMaskedFusedNegativeMultiplyAdd132Float32x16 + OpMaskedFusedNegativeMultiplyAdd213Float32x16 + OpMaskedFusedNegativeMultiplyAdd231Float32x16 + OpMaskedFusedNegativeMultiplySub132Float32x16 + OpMaskedFusedNegativeMultiplySub213Float32x16 + OpMaskedFusedNegativeMultiplySub231Float32x16 OpMaskedGreaterFloat32x16 OpMaskedGreaterEqualFloat32x16 OpMaskedIsNanFloat32x16 @@ -4143,6 +4395,24 @@ const ( OpDivFloat32x4 OpEqualFloat32x4 OpFloorFloat32x4 + OpFusedMultiplyAdd132Float32x4 + OpFusedMultiplyAdd213Float32x4 + OpFusedMultiplyAdd231Float32x4 + OpFusedMultiplyAddSub132Float32x4 + OpFusedMultiplyAddSub213Float32x4 + OpFusedMultiplyAddSub231Float32x4 + OpFusedMultiplySub132Float32x4 + OpFusedMultiplySub213Float32x4 + OpFusedMultiplySub231Float32x4 + OpFusedMultiplySubAdd132Float32x4 + OpFusedMultiplySubAdd213Float32x4 + OpFusedMultiplySubAdd231Float32x4 + OpFusedNegativeMultiplyAdd132Float32x4 + OpFusedNegativeMultiplyAdd213Float32x4 + OpFusedNegativeMultiplyAdd231Float32x4 + OpFusedNegativeMultiplySub132Float32x4 + OpFusedNegativeMultiplySub213Float32x4 + OpFusedNegativeMultiplySub231Float32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 OpIsNanFloat32x4 @@ -4155,6 +4425,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x4 OpMaskedDivFloat32x4 OpMaskedEqualFloat32x4 + OpMaskedFusedMultiplyAdd132Float32x4 + OpMaskedFusedMultiplyAdd213Float32x4 + OpMaskedFusedMultiplyAdd231Float32x4 + OpMaskedFusedMultiplyAddSub132Float32x4 + OpMaskedFusedMultiplyAddSub213Float32x4 + OpMaskedFusedMultiplyAddSub231Float32x4 + OpMaskedFusedMultiplySub132Float32x4 + OpMaskedFusedMultiplySub213Float32x4 + OpMaskedFusedMultiplySub231Float32x4 + OpMaskedFusedMultiplySubAdd132Float32x4 + OpMaskedFusedMultiplySubAdd213Float32x4 + OpMaskedFusedMultiplySubAdd231Float32x4 + OpMaskedFusedNegativeMultiplyAdd132Float32x4 + OpMaskedFusedNegativeMultiplyAdd213Float32x4 + OpMaskedFusedNegativeMultiplyAdd231Float32x4 + OpMaskedFusedNegativeMultiplySub132Float32x4 + OpMaskedFusedNegativeMultiplySub213Float32x4 + OpMaskedFusedNegativeMultiplySub231Float32x4 OpMaskedGreaterFloat32x4 OpMaskedGreaterEqualFloat32x4 OpMaskedIsNanFloat32x4 @@ -4192,6 +4480,24 @@ const ( OpDivFloat32x8 OpEqualFloat32x8 OpFloorFloat32x8 + OpFusedMultiplyAdd132Float32x8 + OpFusedMultiplyAdd213Float32x8 + OpFusedMultiplyAdd231Float32x8 + OpFusedMultiplyAddSub132Float32x8 + OpFusedMultiplyAddSub213Float32x8 + OpFusedMultiplyAddSub231Float32x8 + OpFusedMultiplySub132Float32x8 + OpFusedMultiplySub213Float32x8 + OpFusedMultiplySub231Float32x8 + OpFusedMultiplySubAdd132Float32x8 + OpFusedMultiplySubAdd213Float32x8 + OpFusedMultiplySubAdd231Float32x8 + OpFusedNegativeMultiplyAdd132Float32x8 + OpFusedNegativeMultiplyAdd213Float32x8 + OpFusedNegativeMultiplyAdd231Float32x8 + OpFusedNegativeMultiplySub132Float32x8 + OpFusedNegativeMultiplySub213Float32x8 + OpFusedNegativeMultiplySub231Float32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 OpIsNanFloat32x8 @@ -4204,6 +4510,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x8 OpMaskedDivFloat32x8 OpMaskedEqualFloat32x8 + OpMaskedFusedMultiplyAdd132Float32x8 + OpMaskedFusedMultiplyAdd213Float32x8 + OpMaskedFusedMultiplyAdd231Float32x8 + OpMaskedFusedMultiplyAddSub132Float32x8 + OpMaskedFusedMultiplyAddSub213Float32x8 + OpMaskedFusedMultiplyAddSub231Float32x8 + OpMaskedFusedMultiplySub132Float32x8 + OpMaskedFusedMultiplySub213Float32x8 + OpMaskedFusedMultiplySub231Float32x8 + OpMaskedFusedMultiplySubAdd132Float32x8 + OpMaskedFusedMultiplySubAdd213Float32x8 + OpMaskedFusedMultiplySubAdd231Float32x8 + OpMaskedFusedNegativeMultiplyAdd132Float32x8 + OpMaskedFusedNegativeMultiplyAdd213Float32x8 + OpMaskedFusedNegativeMultiplyAdd231Float32x8 + OpMaskedFusedNegativeMultiplySub132Float32x8 + OpMaskedFusedNegativeMultiplySub213Float32x8 + OpMaskedFusedNegativeMultiplySub231Float32x8 OpMaskedGreaterFloat32x8 OpMaskedGreaterEqualFloat32x8 OpMaskedIsNanFloat32x8 @@ -4242,6 +4566,24 @@ const ( OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 OpFloorFloat64x2 + OpFusedMultiplyAdd132Float64x2 + OpFusedMultiplyAdd213Float64x2 + OpFusedMultiplyAdd231Float64x2 + OpFusedMultiplyAddSub132Float64x2 + OpFusedMultiplyAddSub213Float64x2 + OpFusedMultiplyAddSub231Float64x2 + OpFusedMultiplySub132Float64x2 + OpFusedMultiplySub213Float64x2 + OpFusedMultiplySub231Float64x2 + OpFusedMultiplySubAdd132Float64x2 + OpFusedMultiplySubAdd213Float64x2 + OpFusedMultiplySubAdd231Float64x2 + OpFusedNegativeMultiplyAdd132Float64x2 + OpFusedNegativeMultiplyAdd213Float64x2 + OpFusedNegativeMultiplyAdd231Float64x2 + OpFusedNegativeMultiplySub132Float64x2 + OpFusedNegativeMultiplySub213Float64x2 + OpFusedNegativeMultiplySub231Float64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 OpIsNanFloat64x2 @@ -4254,6 +4596,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x2 OpMaskedDivFloat64x2 OpMaskedEqualFloat64x2 + OpMaskedFusedMultiplyAdd132Float64x2 + OpMaskedFusedMultiplyAdd213Float64x2 + OpMaskedFusedMultiplyAdd231Float64x2 + OpMaskedFusedMultiplyAddSub132Float64x2 + OpMaskedFusedMultiplyAddSub213Float64x2 + OpMaskedFusedMultiplyAddSub231Float64x2 + OpMaskedFusedMultiplySub132Float64x2 + OpMaskedFusedMultiplySub213Float64x2 + OpMaskedFusedMultiplySub231Float64x2 + OpMaskedFusedMultiplySubAdd132Float64x2 + OpMaskedFusedMultiplySubAdd213Float64x2 + OpMaskedFusedMultiplySubAdd231Float64x2 + OpMaskedFusedNegativeMultiplyAdd132Float64x2 + OpMaskedFusedNegativeMultiplyAdd213Float64x2 + OpMaskedFusedNegativeMultiplyAdd231Float64x2 + OpMaskedFusedNegativeMultiplySub132Float64x2 + OpMaskedFusedNegativeMultiplySub213Float64x2 + OpMaskedFusedNegativeMultiplySub231Float64x2 OpMaskedGreaterFloat64x2 OpMaskedGreaterEqualFloat64x2 OpMaskedIsNanFloat64x2 @@ -4291,6 +4651,24 @@ const ( OpDivFloat64x4 OpEqualFloat64x4 OpFloorFloat64x4 + OpFusedMultiplyAdd132Float64x4 + OpFusedMultiplyAdd213Float64x4 + OpFusedMultiplyAdd231Float64x4 + OpFusedMultiplyAddSub132Float64x4 + OpFusedMultiplyAddSub213Float64x4 + OpFusedMultiplyAddSub231Float64x4 + OpFusedMultiplySub132Float64x4 + OpFusedMultiplySub213Float64x4 + OpFusedMultiplySub231Float64x4 + OpFusedMultiplySubAdd132Float64x4 + OpFusedMultiplySubAdd213Float64x4 + OpFusedMultiplySubAdd231Float64x4 + OpFusedNegativeMultiplyAdd132Float64x4 + OpFusedNegativeMultiplyAdd213Float64x4 + OpFusedNegativeMultiplyAdd231Float64x4 + OpFusedNegativeMultiplySub132Float64x4 + OpFusedNegativeMultiplySub213Float64x4 + OpFusedNegativeMultiplySub231Float64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 OpIsNanFloat64x4 @@ -4303,6 +4681,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x4 OpMaskedDivFloat64x4 OpMaskedEqualFloat64x4 + OpMaskedFusedMultiplyAdd132Float64x4 + OpMaskedFusedMultiplyAdd213Float64x4 + OpMaskedFusedMultiplyAdd231Float64x4 + OpMaskedFusedMultiplyAddSub132Float64x4 + OpMaskedFusedMultiplyAddSub213Float64x4 + OpMaskedFusedMultiplyAddSub231Float64x4 + OpMaskedFusedMultiplySub132Float64x4 + OpMaskedFusedMultiplySub213Float64x4 + OpMaskedFusedMultiplySub231Float64x4 + OpMaskedFusedMultiplySubAdd132Float64x4 + OpMaskedFusedMultiplySubAdd213Float64x4 + OpMaskedFusedMultiplySubAdd231Float64x4 + OpMaskedFusedNegativeMultiplyAdd132Float64x4 + OpMaskedFusedNegativeMultiplyAdd213Float64x4 + OpMaskedFusedNegativeMultiplyAdd231Float64x4 + OpMaskedFusedNegativeMultiplySub132Float64x4 + OpMaskedFusedNegativeMultiplySub213Float64x4 + OpMaskedFusedNegativeMultiplySub231Float64x4 OpMaskedGreaterFloat64x4 OpMaskedGreaterEqualFloat64x4 OpMaskedIsNanFloat64x4 @@ -4337,6 +4733,24 @@ const ( OpApproximateReciprocalOfSqrtFloat64x8 OpDivFloat64x8 OpEqualFloat64x8 + OpFusedMultiplyAdd132Float64x8 + OpFusedMultiplyAdd213Float64x8 + OpFusedMultiplyAdd231Float64x8 + OpFusedMultiplyAddSub132Float64x8 + OpFusedMultiplyAddSub213Float64x8 + OpFusedMultiplyAddSub231Float64x8 + OpFusedMultiplySub132Float64x8 + OpFusedMultiplySub213Float64x8 + OpFusedMultiplySub231Float64x8 + OpFusedMultiplySubAdd132Float64x8 + OpFusedMultiplySubAdd213Float64x8 + OpFusedMultiplySubAdd231Float64x8 + OpFusedNegativeMultiplyAdd132Float64x8 + OpFusedNegativeMultiplyAdd213Float64x8 + OpFusedNegativeMultiplyAdd231Float64x8 + OpFusedNegativeMultiplySub132Float64x8 + OpFusedNegativeMultiplySub213Float64x8 + OpFusedNegativeMultiplySub231Float64x8 OpGreaterFloat64x8 OpGreaterEqualFloat64x8 OpIsNanFloat64x8 @@ -4349,6 +4763,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x8 OpMaskedDivFloat64x8 OpMaskedEqualFloat64x8 + OpMaskedFusedMultiplyAdd132Float64x8 + OpMaskedFusedMultiplyAdd213Float64x8 + OpMaskedFusedMultiplyAdd231Float64x8 + OpMaskedFusedMultiplyAddSub132Float64x8 + OpMaskedFusedMultiplyAddSub213Float64x8 + OpMaskedFusedMultiplyAddSub231Float64x8 + OpMaskedFusedMultiplySub132Float64x8 + OpMaskedFusedMultiplySub213Float64x8 + OpMaskedFusedMultiplySub231Float64x8 + OpMaskedFusedMultiplySubAdd132Float64x8 + OpMaskedFusedMultiplySubAdd213Float64x8 + OpMaskedFusedMultiplySubAdd231Float64x8 + OpMaskedFusedNegativeMultiplyAdd132Float64x8 + OpMaskedFusedNegativeMultiplyAdd213Float64x8 + OpMaskedFusedNegativeMultiplyAdd231Float64x8 + OpMaskedFusedNegativeMultiplySub132Float64x8 + OpMaskedFusedNegativeMultiplySub213Float64x8 + OpMaskedFusedNegativeMultiplySub231Float64x8 OpMaskedGreaterFloat64x8 OpMaskedGreaterEqualFloat64x8 OpMaskedIsNanFloat64x8 @@ -18107,15 +18539,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMADD132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18123,15 +18555,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, + name: "VFMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18139,15 +18571,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VFMADD231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18155,13 +18587,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked512", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMADDSUB132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18169,13 +18603,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked512", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADDSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18183,14 +18619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked512", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADDSUB231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18198,15 +18635,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUB132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18214,15 +18651,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18230,15 +18667,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VFMSUB231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18246,14 +18683,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked512", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VFMSUBADD132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18261,15 +18699,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVORPS, + name: "VFMSUBADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18277,13 +18715,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked512", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VFMSUBADD231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18291,15 +18731,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, + name: "VFNMADD132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18307,14 +18747,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS512", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VFNMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18322,14 +18763,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS512", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VFNMADD231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18337,14 +18779,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS512", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VFNMSUB132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18352,13 +18795,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS512", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VFNMSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18366,14 +18811,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPS512", - argLen: 2, - commutative: true, - asm: x86.AVORPS, + name: "VFNMSUB231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18381,12 +18827,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS512", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18394,14 +18843,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPS512", - argLen: 2, + name: "VANDPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVXORPS, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18409,14 +18859,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS128", - argLen: 2, + name: "VANDNPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PSMasked512", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18424,13 +18889,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS128", + name: "VRSQRT14PSMasked512", argLen: 2, - asm: x86.AVADDSUBPS, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked512", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18438,14 +18918,50 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, + name: "VFMADD132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18453,14 +18969,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VFMADDSUB132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18468,39 +19003,101 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS128", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VFMADDSUB231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRTPS128", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VFMSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VDIVPS128", - argLen: 2, - asm: x86.AVDIVPS, + name: "VFMSUBADD132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18508,15 +19105,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMSUBADD231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18524,15 +19122,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, + name: "VFNMADD132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18540,15 +19139,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VFNMADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18556,13 +19156,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFNMADD231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18570,13 +19173,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFNMSUB132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18584,14 +19190,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFNMSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18599,7 +19207,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked128", + name: "VFNMSUB231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPSMasked512", argLen: 3, commutative: true, asm: x86.AVMAXPS, @@ -18615,7 +19240,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked128", + name: "VMINPSMasked512", argLen: 3, commutative: true, asm: x86.AVMINPS, @@ -18631,7 +19256,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked128", + name: "VMULPSMasked512", argLen: 3, commutative: true, asm: x86.AVMULPS, @@ -18647,7 +19272,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked128", + name: "VSCALEFPSMasked512", argLen: 3, asm: x86.AVSCALEFPS, reg: regInfo{ @@ -18662,7 +19287,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPSMasked128", + name: "VORPSMasked512", argLen: 3, commutative: true, asm: x86.AVORPS, @@ -18678,7 +19303,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked128", + name: "VSQRTPSMasked512", argLen: 2, asm: x86.AVSQRTPS, reg: regInfo{ @@ -18692,7 +19317,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPSMasked128", + name: "VXORPSMasked512", argLen: 3, commutative: true, asm: x86.AVXORPS, @@ -18708,7 +19333,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS128", + name: "VMAXPS512", argLen: 2, commutative: true, asm: x86.AVMAXPS, @@ -18723,7 +19348,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS128", + name: "VMINPS512", argLen: 2, commutative: true, asm: x86.AVMINPS, @@ -18738,7 +19363,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS128", + name: "VMULPS512", argLen: 2, commutative: true, asm: x86.AVMULPS, @@ -18753,7 +19378,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS128", + name: "VSCALEFPS512", argLen: 2, asm: x86.AVSCALEFPS, reg: regInfo{ @@ -18767,7 +19392,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPS128", + name: "VORPS512", argLen: 2, commutative: true, asm: x86.AVORPS, @@ -18782,13 +19407,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS128", - argLen: 2, - asm: x86.AVHADDPS, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18796,9 +19420,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS128", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VXORPS512", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18810,12 +19435,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS128", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18823,10 +19450,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPS128", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, + name: "VADDSUBPS128", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18838,10 +19464,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS256", + name: "VANDPS128", argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18853,10 +19479,2993 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, - reg: regInfo{ + name: "VANDNPS128", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PS128", + argLen: 1, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS128", + argLen: 2, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked128", + argLen: 3, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPSMasked128", + argLen: 2, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPS128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPS128", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPS128", + argLen: 2, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPS128", + argLen: 1, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPS128", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS256", + argLen: 2, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPSMasked256", + argLen: 2, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPS256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPS256", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPS256", + argLen: 2, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHADDPS256", + argLen: 2, + asm: x86.AVHADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPS256", + argLen: 2, + asm: x86.AVHSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPS256", + argLen: 1, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPS256", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPD128", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDSUBPD128", + argLen: 2, + asm: x86.AVADDSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD128", + argLen: 2, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPDMasked128", + argLen: 3, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPD128", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPD128", + argLen: 2, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPD128", + argLen: 2, + commutative: true, + asm: x86.AVORPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, + reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18867,14 +22476,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18882,10 +22489,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS256", + name: "VXORPD128", argLen: 2, commutative: true, - asm: x86.AVANDNPS, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18897,12 +22504,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS256", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18910,12 +22519,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18923,9 +22533,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS256", - argLen: 2, - asm: x86.AVDIVPS, + name: "VANDPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18937,15 +22548,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked256", - argLen: 3, + name: "VANDNPD256", + argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18953,15 +22563,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18969,15 +22576,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18985,13 +22589,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked256", + name: "VDIVPD256", argLen: 2, - asm: x86.AVRCP14PS, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18999,13 +22603,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked256", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19013,14 +22619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked256", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19028,15 +22635,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMADD231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19044,15 +22651,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMADDSUB132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19060,15 +22667,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VFMADDSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19076,14 +22683,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VFMADDSUB231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19091,15 +22699,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPS, + name: "VFMSUB132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19107,13 +22715,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VFMSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19121,15 +22731,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, + name: "VFMSUB231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19137,14 +22747,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUBADD132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19152,14 +22763,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUBADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19167,14 +22779,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS256", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VFMSUBADD231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19182,13 +22795,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VFNMADD132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19196,14 +22811,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPS256", - argLen: 2, - commutative: true, - asm: x86.AVORPS, + name: "VFNMADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19211,13 +22827,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS256", - argLen: 2, - asm: x86.AVHADDPS, + name: "VFNMADD231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19225,13 +22843,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS256", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VFNMSUB132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19239,12 +22859,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS256", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VFNMSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19252,14 +22875,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPS256", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, + name: "VFNMSUB231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19267,14 +22891,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD128", - argLen: 2, + name: "VADDPDMasked256", + argLen: 3, commutative: true, asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19282,13 +22923,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD128", + name: "VANDNPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PDMasked256", argLen: 2, - asm: x86.AVADDSUBPD, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19296,14 +22967,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19311,14 +22999,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VFMADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19326,39 +23033,118 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VFMADDSUB132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMADDSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VFMADDSUB231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VDIVPD128", - argLen: 2, - asm: x86.AVDIVPD, + name: "VFMSUB231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19366,15 +23152,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VFMSUBADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19382,15 +23169,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VFMSUBADD231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19398,15 +23186,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VFNMADD132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19414,13 +23203,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VFNMADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19428,13 +23220,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked128", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VFNMADD231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19442,14 +23237,50 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked128", - argLen: 3, - asm: x86.AVDIVPD, + name: "VFNMSUB132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19457,7 +23288,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked128", + name: "VMAXPDMasked256", argLen: 3, commutative: true, asm: x86.AVMAXPD, @@ -19473,7 +23304,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", + name: "VMINPDMasked256", argLen: 3, commutative: true, asm: x86.AVMINPD, @@ -19489,7 +23320,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked128", + name: "VMULPDMasked256", argLen: 3, commutative: true, asm: x86.AVMULPD, @@ -19505,7 +23336,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked128", + name: "VSCALEFPDMasked256", argLen: 3, asm: x86.AVSCALEFPD, reg: regInfo{ @@ -19520,7 +23351,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPDMasked128", + name: "VORPDMasked256", argLen: 3, commutative: true, asm: x86.AVORPD, @@ -19536,7 +23367,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", + name: "VSQRTPDMasked256", argLen: 2, asm: x86.AVSQRTPD, reg: regInfo{ @@ -19550,7 +23381,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPDMasked128", + name: "VXORPDMasked256", argLen: 3, commutative: true, asm: x86.AVXORPD, @@ -19566,7 +23397,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMAXPD256", argLen: 2, commutative: true, asm: x86.AVMAXPD, @@ -19581,7 +23412,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", + name: "VMINPD256", argLen: 2, commutative: true, asm: x86.AVMINPD, @@ -19596,7 +23427,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD128", + name: "VMULPD256", argLen: 2, commutative: true, asm: x86.AVMULPD, @@ -19611,7 +23442,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD128", + name: "VSCALEFPD256", argLen: 2, asm: x86.AVSCALEFPD, reg: regInfo{ @@ -19625,7 +23456,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPD128", + name: "VORPD256", argLen: 2, commutative: true, asm: x86.AVORPD, @@ -19640,7 +23471,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", + name: "VHADDPD256", argLen: 2, asm: x86.AVHADDPD, reg: regInfo{ @@ -19654,7 +23485,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", + name: "VHSUBPD256", argLen: 2, asm: x86.AVHSUBPD, reg: regInfo{ @@ -19668,7 +23499,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", + name: "VSQRTPD256", argLen: 1, asm: x86.AVSQRTPD, reg: regInfo{ @@ -19681,7 +23512,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD128", + name: "VXORPD256", argLen: 2, commutative: true, asm: x86.AVXORPD, @@ -19696,7 +23527,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", + name: "VADDPD512", argLen: 2, commutative: true, asm: x86.AVADDPD, @@ -19711,21 +23542,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", - argLen: 2, - asm: x86.AVADDSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPD256", + name: "VANDPD512", argLen: 2, commutative: true, asm: x86.AVANDPD, @@ -19740,7 +23557,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD256", + name: "VANDNPD512", argLen: 2, commutative: true, asm: x86.AVANDNPD, @@ -19755,7 +23572,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", + name: "VRCP14PD512", argLen: 1, asm: x86.AVRCP14PD, reg: regInfo{ @@ -19768,7 +23585,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", + name: "VRSQRT14PD512", argLen: 1, asm: x86.AVRSQRT14PD, reg: regInfo{ @@ -19781,7 +23598,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", + name: "VDIVPD512", argLen: 2, asm: x86.AVDIVPD, reg: regInfo{ @@ -19795,15 +23612,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VFMADD132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19811,15 +23628,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VFMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19827,15 +23644,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VFMADD231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19843,13 +23660,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked256", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VFMADDSUB132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19857,13 +23676,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked256", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VFMADDSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19871,14 +23692,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked256", - argLen: 3, - asm: x86.AVDIVPD, + name: "VFMADDSUB231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19886,15 +23708,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, + name: "VFMSUB132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19902,15 +23724,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VFMSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19918,15 +23740,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, + name: "VFMSUB231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19934,14 +23756,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked256", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VFMSUBADD132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19949,15 +23772,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPD, + name: "VFMSUBADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19965,13 +23788,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked256", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VFMSUBADD231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19979,15 +23804,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, + name: "VFNMADD132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19995,14 +23820,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPD, + name: "VFNMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20010,14 +23836,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD256", - argLen: 2, - commutative: true, - asm: x86.AVMINPD, + name: "VFNMADD231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20025,14 +23852,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD256", - argLen: 2, - commutative: true, - asm: x86.AVMULPD, + name: "VFNMSUB132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20040,13 +23868,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD256", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VFNMSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20054,14 +23884,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPD256", - argLen: 2, - commutative: true, - asm: x86.AVORPD, + name: "VFNMSUB231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20069,13 +23900,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD256", - argLen: 2, - asm: x86.AVHADDPD, + name: "VADDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20083,13 +23916,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD256", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VANDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20097,27 +23932,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD256", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VANDNPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VRCP14PDMasked512", + argLen: 2, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VXORPD256", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPDMasked512", + argLen: 3, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20125,14 +23991,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VFMADD132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20140,14 +24025,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, + name: "VFMADD231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20155,14 +24059,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VFMADDSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20170,39 +24093,101 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD512", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VFMSUB132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PD512", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VFMSUB231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUBADD132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VDIVPD512", - argLen: 2, - asm: x86.AVDIVPD, + name: "VFMSUBADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20210,15 +24195,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VFNMADD132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20226,15 +24212,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VFNMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20242,15 +24229,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VFNMADD231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20258,13 +24246,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked512", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VFNMSUB132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20272,13 +24263,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked512", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VFNMSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20286,14 +24280,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked512", - argLen: 3, - asm: x86.AVDIVPD, + name: "VFNMSUB231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -55307,6 +59303,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FusedMultiplyAdd132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float32x16", + argLen: 3, + generic: true, + }, { name: "GreaterFloat32x16", argLen: 2, @@ -55372,6 +59458,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float32x16", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat32x16", argLen: 3, @@ -55554,6 +59730,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "FusedMultiplyAdd132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float32x4", + argLen: 3, + generic: true, + }, { name: "GreaterFloat32x4", argLen: 2, @@ -55619,6 +59885,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float32x4", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat32x4", argLen: 3, @@ -55817,8 +60173,98 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FloorFloat32x8", - argLen: 1, + name: "FloorFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "FusedMultiplyAdd132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float32x8", + argLen: 3, generic: true, }, { @@ -55886,6 +60332,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float32x8", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat32x8", argLen: 3, @@ -56094,6 +60630,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "FusedMultiplyAdd132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float64x2", + argLen: 3, + generic: true, + }, { name: "GreaterFloat64x2", argLen: 2, @@ -56159,6 +60785,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float64x2", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat64x2", argLen: 3, @@ -56361,6 +61077,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "FusedMultiplyAdd132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float64x4", + argLen: 3, + generic: true, + }, { name: "GreaterFloat64x4", argLen: 2, @@ -56426,6 +61232,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float64x4", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat64x4", argLen: 3, @@ -56613,6 +61509,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FusedMultiplyAdd132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float64x8", + argLen: 3, + generic: true, + }, { name: "GreaterFloat64x8", argLen: 2, @@ -56678,6 +61664,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float64x8", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat64x8", argLen: 3, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 60469f49d944da..e9bafe2a1b400f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1385,6 +1385,330 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) case OpFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) + case OpFusedMultiplyAdd132Float32x16: + v.Op = OpAMD64VFMADD132PS512 + return true + case OpFusedMultiplyAdd132Float32x4: + v.Op = OpAMD64VFMADD132PS128 + return true + case OpFusedMultiplyAdd132Float32x8: + v.Op = OpAMD64VFMADD132PS256 + return true + case OpFusedMultiplyAdd132Float64x2: + v.Op = OpAMD64VFMADD132PD128 + return true + case OpFusedMultiplyAdd132Float64x4: + v.Op = OpAMD64VFMADD132PD256 + return true + case OpFusedMultiplyAdd132Float64x8: + v.Op = OpAMD64VFMADD132PD512 + return true + case OpFusedMultiplyAdd213Float32x16: + v.Op = OpAMD64VFMADD213PS512 + return true + case OpFusedMultiplyAdd213Float32x4: + v.Op = OpAMD64VFMADD213PS128 + return true + case OpFusedMultiplyAdd213Float32x8: + v.Op = OpAMD64VFMADD213PS256 + return true + case OpFusedMultiplyAdd213Float64x2: + v.Op = OpAMD64VFMADD213PD128 + return true + case OpFusedMultiplyAdd213Float64x4: + v.Op = OpAMD64VFMADD213PD256 + return true + case OpFusedMultiplyAdd213Float64x8: + v.Op = OpAMD64VFMADD213PD512 + return true + case OpFusedMultiplyAdd231Float32x16: + v.Op = OpAMD64VFMADD231PS512 + return true + case OpFusedMultiplyAdd231Float32x4: + v.Op = OpAMD64VFMADD231PS128 + return true + case OpFusedMultiplyAdd231Float32x8: + v.Op = OpAMD64VFMADD231PS256 + return true + case OpFusedMultiplyAdd231Float64x2: + v.Op = OpAMD64VFMADD231PD128 + return true + case OpFusedMultiplyAdd231Float64x4: + v.Op = OpAMD64VFMADD231PD256 + return true + case OpFusedMultiplyAdd231Float64x8: + v.Op = OpAMD64VFMADD231PD512 + return true + case OpFusedMultiplyAddSub132Float32x16: + v.Op = OpAMD64VFMADDSUB132PS512 + return true + case OpFusedMultiplyAddSub132Float32x4: + v.Op = OpAMD64VFMADDSUB132PS128 + return true + case OpFusedMultiplyAddSub132Float32x8: + v.Op = OpAMD64VFMADDSUB132PS256 + return true + case OpFusedMultiplyAddSub132Float64x2: + v.Op = OpAMD64VFMADDSUB132PD128 + return true + case OpFusedMultiplyAddSub132Float64x4: + v.Op = OpAMD64VFMADDSUB132PD256 + return true + case OpFusedMultiplyAddSub132Float64x8: + v.Op = OpAMD64VFMADDSUB132PD512 + return true + case OpFusedMultiplyAddSub213Float32x16: + v.Op = OpAMD64VFMADDSUB213PS512 + return true + case OpFusedMultiplyAddSub213Float32x4: + v.Op = OpAMD64VFMADDSUB213PS128 + return true + case OpFusedMultiplyAddSub213Float32x8: + v.Op = OpAMD64VFMADDSUB213PS256 + return true + case OpFusedMultiplyAddSub213Float64x2: + v.Op = OpAMD64VFMADDSUB213PD128 + return true + case OpFusedMultiplyAddSub213Float64x4: + v.Op = OpAMD64VFMADDSUB213PD256 + return true + case OpFusedMultiplyAddSub213Float64x8: + v.Op = OpAMD64VFMADDSUB213PD512 + return true + case OpFusedMultiplyAddSub231Float32x16: + v.Op = OpAMD64VFMADDSUB231PS512 + return true + case OpFusedMultiplyAddSub231Float32x4: + v.Op = OpAMD64VFMADDSUB231PS128 + return true + case OpFusedMultiplyAddSub231Float32x8: + v.Op = OpAMD64VFMADDSUB231PS256 + return true + case OpFusedMultiplyAddSub231Float64x2: + v.Op = OpAMD64VFMADDSUB231PD128 + return true + case OpFusedMultiplyAddSub231Float64x4: + v.Op = OpAMD64VFMADDSUB231PD256 + return true + case OpFusedMultiplyAddSub231Float64x8: + v.Op = OpAMD64VFMADDSUB231PD512 + return true + case OpFusedMultiplySub132Float32x16: + v.Op = OpAMD64VFMSUB132PS512 + return true + case OpFusedMultiplySub132Float32x4: + v.Op = OpAMD64VFMSUB132PS128 + return true + case OpFusedMultiplySub132Float32x8: + v.Op = OpAMD64VFMSUB132PS256 + return true + case OpFusedMultiplySub132Float64x2: + v.Op = OpAMD64VFMSUB132PD128 + return true + case OpFusedMultiplySub132Float64x4: + v.Op = OpAMD64VFMSUB132PD256 + return true + case OpFusedMultiplySub132Float64x8: + v.Op = OpAMD64VFMSUB132PD512 + return true + case OpFusedMultiplySub213Float32x16: + v.Op = OpAMD64VFMSUB213PS512 + return true + case OpFusedMultiplySub213Float32x4: + v.Op = OpAMD64VFMSUB213PS128 + return true + case OpFusedMultiplySub213Float32x8: + v.Op = OpAMD64VFMSUB213PS256 + return true + case OpFusedMultiplySub213Float64x2: + v.Op = OpAMD64VFMSUB213PD128 + return true + case OpFusedMultiplySub213Float64x4: + v.Op = OpAMD64VFMSUB213PD256 + return true + case OpFusedMultiplySub213Float64x8: + v.Op = OpAMD64VFMSUB213PD512 + return true + case OpFusedMultiplySub231Float32x16: + v.Op = OpAMD64VFMSUB231PS512 + return true + case OpFusedMultiplySub231Float32x4: + v.Op = OpAMD64VFMSUB231PS128 + return true + case OpFusedMultiplySub231Float32x8: + v.Op = OpAMD64VFMSUB231PS256 + return true + case OpFusedMultiplySub231Float64x2: + v.Op = OpAMD64VFMSUB231PD128 + return true + case OpFusedMultiplySub231Float64x4: + v.Op = OpAMD64VFMSUB231PD256 + return true + case OpFusedMultiplySub231Float64x8: + v.Op = OpAMD64VFMSUB231PD512 + return true + case OpFusedMultiplySubAdd132Float32x16: + v.Op = OpAMD64VFMSUBADD132PS512 + return true + case OpFusedMultiplySubAdd132Float32x4: + v.Op = OpAMD64VFMSUBADD132PS128 + return true + case OpFusedMultiplySubAdd132Float32x8: + v.Op = OpAMD64VFMSUBADD132PS256 + return true + case OpFusedMultiplySubAdd132Float64x2: + v.Op = OpAMD64VFMSUBADD132PD128 + return true + case OpFusedMultiplySubAdd132Float64x4: + v.Op = OpAMD64VFMSUBADD132PD256 + return true + case OpFusedMultiplySubAdd132Float64x8: + v.Op = OpAMD64VFMSUBADD132PD512 + return true + case OpFusedMultiplySubAdd213Float32x16: + v.Op = OpAMD64VFMSUBADD213PS512 + return true + case OpFusedMultiplySubAdd213Float32x4: + v.Op = OpAMD64VFMSUBADD213PS128 + return true + case OpFusedMultiplySubAdd213Float32x8: + v.Op = OpAMD64VFMSUBADD213PS256 + return true + case OpFusedMultiplySubAdd213Float64x2: + v.Op = OpAMD64VFMSUBADD213PD128 + return true + case OpFusedMultiplySubAdd213Float64x4: + v.Op = OpAMD64VFMSUBADD213PD256 + return true + case OpFusedMultiplySubAdd213Float64x8: + v.Op = OpAMD64VFMSUBADD213PD512 + return true + case OpFusedMultiplySubAdd231Float32x16: + v.Op = OpAMD64VFMSUBADD231PS512 + return true + case OpFusedMultiplySubAdd231Float32x4: + v.Op = OpAMD64VFMSUBADD231PS128 + return true + case OpFusedMultiplySubAdd231Float32x8: + v.Op = OpAMD64VFMSUBADD231PS256 + return true + case OpFusedMultiplySubAdd231Float64x2: + v.Op = OpAMD64VFMSUBADD231PD128 + return true + case OpFusedMultiplySubAdd231Float64x4: + v.Op = OpAMD64VFMSUBADD231PD256 + return true + case OpFusedMultiplySubAdd231Float64x8: + v.Op = OpAMD64VFMSUBADD231PD512 + return true + case OpFusedNegativeMultiplyAdd132Float32x16: + v.Op = OpAMD64VFNMADD132PS512 + return true + case OpFusedNegativeMultiplyAdd132Float32x4: + v.Op = OpAMD64VFNMADD132PS128 + return true + case OpFusedNegativeMultiplyAdd132Float32x8: + v.Op = OpAMD64VFNMADD132PS256 + return true + case OpFusedNegativeMultiplyAdd132Float64x2: + v.Op = OpAMD64VFNMADD132PD128 + return true + case OpFusedNegativeMultiplyAdd132Float64x4: + v.Op = OpAMD64VFNMADD132PD256 + return true + case OpFusedNegativeMultiplyAdd132Float64x8: + v.Op = OpAMD64VFNMADD132PD512 + return true + case OpFusedNegativeMultiplyAdd213Float32x16: + v.Op = OpAMD64VFNMADD213PS512 + return true + case OpFusedNegativeMultiplyAdd213Float32x4: + v.Op = OpAMD64VFNMADD213PS128 + return true + case OpFusedNegativeMultiplyAdd213Float32x8: + v.Op = OpAMD64VFNMADD213PS256 + return true + case OpFusedNegativeMultiplyAdd213Float64x2: + v.Op = OpAMD64VFNMADD213PD128 + return true + case OpFusedNegativeMultiplyAdd213Float64x4: + v.Op = OpAMD64VFNMADD213PD256 + return true + case OpFusedNegativeMultiplyAdd213Float64x8: + v.Op = OpAMD64VFNMADD213PD512 + return true + case OpFusedNegativeMultiplyAdd231Float32x16: + v.Op = OpAMD64VFNMADD231PS512 + return true + case OpFusedNegativeMultiplyAdd231Float32x4: + v.Op = OpAMD64VFNMADD231PS128 + return true + case OpFusedNegativeMultiplyAdd231Float32x8: + v.Op = OpAMD64VFNMADD231PS256 + return true + case OpFusedNegativeMultiplyAdd231Float64x2: + v.Op = OpAMD64VFNMADD231PD128 + return true + case OpFusedNegativeMultiplyAdd231Float64x4: + v.Op = OpAMD64VFNMADD231PD256 + return true + case OpFusedNegativeMultiplyAdd231Float64x8: + v.Op = OpAMD64VFNMADD231PD512 + return true + case OpFusedNegativeMultiplySub132Float32x16: + v.Op = OpAMD64VFNMSUB132PS512 + return true + case OpFusedNegativeMultiplySub132Float32x4: + v.Op = OpAMD64VFNMSUB132PS128 + return true + case OpFusedNegativeMultiplySub132Float32x8: + v.Op = OpAMD64VFNMSUB132PS256 + return true + case OpFusedNegativeMultiplySub132Float64x2: + v.Op = OpAMD64VFNMSUB132PD128 + return true + case OpFusedNegativeMultiplySub132Float64x4: + v.Op = OpAMD64VFNMSUB132PD256 + return true + case OpFusedNegativeMultiplySub132Float64x8: + v.Op = OpAMD64VFNMSUB132PD512 + return true + case OpFusedNegativeMultiplySub213Float32x16: + v.Op = OpAMD64VFNMSUB213PS512 + return true + case OpFusedNegativeMultiplySub213Float32x4: + v.Op = OpAMD64VFNMSUB213PS128 + return true + case OpFusedNegativeMultiplySub213Float32x8: + v.Op = OpAMD64VFNMSUB213PS256 + return true + case OpFusedNegativeMultiplySub213Float64x2: + v.Op = OpAMD64VFNMSUB213PD128 + return true + case OpFusedNegativeMultiplySub213Float64x4: + v.Op = OpAMD64VFNMSUB213PD256 + return true + case OpFusedNegativeMultiplySub213Float64x8: + v.Op = OpAMD64VFNMSUB213PD512 + return true + case OpFusedNegativeMultiplySub231Float32x16: + v.Op = OpAMD64VFNMSUB231PS512 + return true + case OpFusedNegativeMultiplySub231Float32x4: + v.Op = OpAMD64VFNMSUB231PS128 + return true + case OpFusedNegativeMultiplySub231Float32x8: + v.Op = OpAMD64VFNMSUB231PS256 + return true + case OpFusedNegativeMultiplySub231Float64x2: + v.Op = OpAMD64VFNMSUB231PD128 + return true + case OpFusedNegativeMultiplySub231Float64x4: + v.Op = OpAMD64VFNMSUB231PD256 + return true + case OpFusedNegativeMultiplySub231Float64x8: + v.Op = OpAMD64VFNMSUB231PD512 + return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2162,6 +2486,222 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) case OpMaskedFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) + case OpMaskedFusedMultiplyAdd132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v) + case OpMaskedFusedMultiplyAdd132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v) + case OpMaskedFusedMultiplyAdd132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v) + case OpMaskedFusedMultiplyAdd132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v) + case OpMaskedFusedMultiplyAdd132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v) + case OpMaskedFusedMultiplyAdd132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v) + case OpMaskedFusedMultiplyAdd213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v) + case OpMaskedFusedMultiplyAdd213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v) + case OpMaskedFusedMultiplyAdd213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v) + case OpMaskedFusedMultiplyAdd213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v) + case OpMaskedFusedMultiplyAdd213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v) + case OpMaskedFusedMultiplyAdd213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v) + case OpMaskedFusedMultiplyAdd231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v) + case OpMaskedFusedMultiplyAdd231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v) + case OpMaskedFusedMultiplyAdd231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v) + case OpMaskedFusedMultiplyAdd231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v) + case OpMaskedFusedMultiplyAdd231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v) + case OpMaskedFusedMultiplyAdd231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v) + case OpMaskedFusedMultiplyAddSub132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v) + case OpMaskedFusedMultiplyAddSub132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v) + case OpMaskedFusedMultiplyAddSub132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v) + case OpMaskedFusedMultiplyAddSub132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v) + case OpMaskedFusedMultiplyAddSub132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v) + case OpMaskedFusedMultiplyAddSub132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v) + case OpMaskedFusedMultiplyAddSub213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v) + case OpMaskedFusedMultiplyAddSub213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v) + case OpMaskedFusedMultiplyAddSub213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v) + case OpMaskedFusedMultiplyAddSub213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v) + case OpMaskedFusedMultiplyAddSub213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v) + case OpMaskedFusedMultiplyAddSub213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v) + case OpMaskedFusedMultiplyAddSub231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v) + case OpMaskedFusedMultiplyAddSub231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v) + case OpMaskedFusedMultiplyAddSub231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v) + case OpMaskedFusedMultiplyAddSub231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v) + case OpMaskedFusedMultiplyAddSub231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v) + case OpMaskedFusedMultiplyAddSub231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v) + case OpMaskedFusedMultiplySub132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v) + case OpMaskedFusedMultiplySub132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v) + case OpMaskedFusedMultiplySub132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v) + case OpMaskedFusedMultiplySub132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v) + case OpMaskedFusedMultiplySub132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v) + case OpMaskedFusedMultiplySub132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v) + case OpMaskedFusedMultiplySub213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v) + case OpMaskedFusedMultiplySub213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v) + case OpMaskedFusedMultiplySub213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v) + case OpMaskedFusedMultiplySub213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v) + case OpMaskedFusedMultiplySub213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v) + case OpMaskedFusedMultiplySub213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v) + case OpMaskedFusedMultiplySub231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v) + case OpMaskedFusedMultiplySub231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v) + case OpMaskedFusedMultiplySub231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v) + case OpMaskedFusedMultiplySub231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v) + case OpMaskedFusedMultiplySub231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v) + case OpMaskedFusedMultiplySub231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v) + case OpMaskedFusedMultiplySubAdd132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v) + case OpMaskedFusedMultiplySubAdd132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v) + case OpMaskedFusedMultiplySubAdd132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v) + case OpMaskedFusedMultiplySubAdd132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v) + case OpMaskedFusedMultiplySubAdd132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v) + case OpMaskedFusedMultiplySubAdd132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v) + case OpMaskedFusedMultiplySubAdd213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v) + case OpMaskedFusedMultiplySubAdd213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v) + case OpMaskedFusedMultiplySubAdd213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v) + case OpMaskedFusedMultiplySubAdd213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v) + case OpMaskedFusedMultiplySubAdd213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v) + case OpMaskedFusedMultiplySubAdd213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v) + case OpMaskedFusedMultiplySubAdd231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v) + case OpMaskedFusedMultiplySubAdd231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v) + case OpMaskedFusedMultiplySubAdd231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v) + case OpMaskedFusedMultiplySubAdd231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v) + case OpMaskedFusedMultiplySubAdd231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v) + case OpMaskedFusedMultiplySubAdd231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v) + case OpMaskedFusedNegativeMultiplyAdd132Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v) + case OpMaskedFusedNegativeMultiplyAdd132Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v) + case OpMaskedFusedNegativeMultiplyAdd132Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v) + case OpMaskedFusedNegativeMultiplyAdd132Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v) + case OpMaskedFusedNegativeMultiplyAdd132Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v) + case OpMaskedFusedNegativeMultiplyAdd132Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v) + case OpMaskedFusedNegativeMultiplyAdd213Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v) + case OpMaskedFusedNegativeMultiplyAdd213Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v) + case OpMaskedFusedNegativeMultiplyAdd213Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v) + case OpMaskedFusedNegativeMultiplyAdd213Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v) + case OpMaskedFusedNegativeMultiplyAdd213Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v) + case OpMaskedFusedNegativeMultiplyAdd213Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v) + case OpMaskedFusedNegativeMultiplyAdd231Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v) + case OpMaskedFusedNegativeMultiplyAdd231Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v) + case OpMaskedFusedNegativeMultiplyAdd231Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v) + case OpMaskedFusedNegativeMultiplyAdd231Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v) + case OpMaskedFusedNegativeMultiplyAdd231Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v) + case OpMaskedFusedNegativeMultiplyAdd231Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v) + case OpMaskedFusedNegativeMultiplySub132Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v) + case OpMaskedFusedNegativeMultiplySub132Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v) + case OpMaskedFusedNegativeMultiplySub132Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v) + case OpMaskedFusedNegativeMultiplySub132Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v) + case OpMaskedFusedNegativeMultiplySub132Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v) + case OpMaskedFusedNegativeMultiplySub132Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v) + case OpMaskedFusedNegativeMultiplySub213Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v) + case OpMaskedFusedNegativeMultiplySub213Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v) + case OpMaskedFusedNegativeMultiplySub213Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v) + case OpMaskedFusedNegativeMultiplySub213Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v) + case OpMaskedFusedNegativeMultiplySub213Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v) + case OpMaskedFusedNegativeMultiplySub213Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v) + case OpMaskedFusedNegativeMultiplySub231Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v) + case OpMaskedFusedNegativeMultiplySub231Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v) + case OpMaskedFusedNegativeMultiplySub231Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v) + case OpMaskedFusedNegativeMultiplySub231Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v) + case OpMaskedFusedNegativeMultiplySub231Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v) + case OpMaskedFusedNegativeMultiplySub231Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -37444,6 +37984,2166 @@ func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float32x16 x y z mask) + // result: (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float32x4 x y z mask) + // result: (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float32x8 x y z mask) + // result: (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float64x2 x y z mask) + // result: (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float64x4 x y z mask) + // result: (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float64x8 x y z mask) + // result: (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float32x16 x y z mask) + // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float32x4 x y z mask) + // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float32x8 x y z mask) + // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float64x2 x y z mask) + // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float64x4 x y z mask) + // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float64x8 x y z mask) + // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float32x16 x y z mask) + // result: (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float32x4 x y z mask) + // result: (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float32x8 x y z mask) + // result: (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float64x2 x y z mask) + // result: (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float64x4 x y z mask) + // result: (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float64x8 x y z mask) + // result: (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float32x16 x y z mask) + // result: (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float32x4 x y z mask) + // result: (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float32x8 x y z mask) + // result: (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float64x2 x y z mask) + // result: (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float64x4 x y z mask) + // result: (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float64x8 x y z mask) + // result: (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float32x16 x y z mask) + // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float32x4 x y z mask) + // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float32x8 x y z mask) + // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float64x2 x y z mask) + // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float64x4 x y z mask) + // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float64x8 x y z mask) + // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float32x16 x y z mask) + // result: (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float32x4 x y z mask) + // result: (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float32x8 x y z mask) + // result: (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float64x2 x y z mask) + // result: (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float64x4 x y z mask) + // result: (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float64x8 x y z mask) + // result: (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float32x16 x y z mask) + // result: (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float32x4 x y z mask) + // result: (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float32x8 x y z mask) + // result: (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float64x2 x y z mask) + // result: (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float64x4 x y z mask) + // result: (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float64x8 x y z mask) + // result: (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float32x16 x y z mask) + // result: (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float32x4 x y z mask) + // result: (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float32x8 x y z mask) + // result: (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float64x2 x y z mask) + // result: (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float64x4 x y z mask) + // result: (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float64x8 x y z mask) + // result: (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float32x16 x y z mask) + // result: (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float32x4 x y z mask) + // result: (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float32x8 x y z mask) + // result: (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float64x2 x y z mask) + // result: (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float64x4 x y z mask) + // result: (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float64x8 x y z mask) + // result: (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float32x16 x y z mask) + // result: (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float32x4 x y z mask) + // result: (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float32x8 x y z mask) + // result: (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float64x2 x y z mask) + // result: (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float64x4 x y z mask) + // result: (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float64x8 x y z mask) + // result: (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float32x16 x y z mask) + // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float32x4 x y z mask) + // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float32x8 x y z mask) + // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float64x2 x y z mask) + // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float64x4 x y z mask) + // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float64x8 x y z mask) + // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float32x16 x y z mask) + // result: (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float32x4 x y z mask) + // result: (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float32x8 x y z mask) + // result: (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float64x2 x y z mask) + // result: (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float64x4 x y z mask) + // result: (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float64x8 x y z mask) + // result: (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) + // result: (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) + // result: (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) + // result: (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) + // result: (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) + // result: (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) + // result: (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) + // result: (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) + // result: (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) + // result: (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) + // result: (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) + // result: (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) + // result: (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) + // result: (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) + // result: (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) + // result: (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) + // result: (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) + // result: (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) + // result: (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) + // result: (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) + // result: (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) + // result: (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) + // result: (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) + // result: (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) + // result: (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) + // result: (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) + // result: (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) + // result: (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) + // result: (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) + // result: (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) + // result: (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) + // result: (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) + // result: (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) + // result: (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) + // result: (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) + // result: (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) + // result: (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index b7b80a706311ea..8b9bd92a0cdebe 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -665,6 +665,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) @@ -683,6 +701,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) @@ -701,6 +737,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) @@ -719,6 +773,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) @@ -737,6 +809,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) @@ -755,6 +845,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) @@ -1136,6 +1244,114 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 49af32bc4fca61..cf37b5efcedd17 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -3529,6 +3529,96 @@ func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) Sub(y Uint8x64) Uint8x64 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 + // Add adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX @@ -3626,6 +3716,96 @@ func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 + // Add adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX @@ -3723,6 +3903,96 @@ func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 + // Add adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX @@ -3820,6 +4090,96 @@ func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 + // Add adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX @@ -3917,6 +4277,96 @@ func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 + // Add adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX @@ -4014,6 +4464,96 @@ func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 + // Add adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX @@ -6082,6 +6622,546 @@ func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX From b9a548775fda6a74de8ab2020b2b95b4ebf1a2a9 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 10 Jun 2025 14:15:46 -0400 Subject: [PATCH 026/139] cmd/compile: add up-to-date test for generated files This runs the ssa/_gen generator writing files into a temporary directory, and then checks that there are no differences with what is currently in the ssa directory, and also checks that any file with the "generated from _gen/..." header was actually generated, and checks that the headers on the generated file match the expected header prefix. Change-Id: Ic8eeb0b06cf6f2e576a013e865b331a12d3a77aa Reviewed-on: https://go-review.googlesource.com/c/go/+/680615 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall (cherry picked from commit d4c6effaa7b95a2ea149ece4a400c0ace2773839) Reviewed-on: https://go-review.googlesource.com/c/go/+/680975 TryBot-Bypass: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/README | 5 + .../compile/internal/ssa/_gen/allocators.go | 2 +- src/cmd/compile/internal/ssa/_gen/main.go | 14 +- src/cmd/compile/internal/ssa/_gen/rulegen.go | 2 +- .../_gen/vendor/golang.org/x/tools/LICENSE | 27 + .../_gen/vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/go/ast/astutil/enclosing.go | 654 ++++++++++++++++++ .../x/tools/go/ast/astutil/imports.go | 490 +++++++++++++ .../x/tools/go/ast/astutil/rewrite.go | 486 +++++++++++++ .../golang.org/x/tools/go/ast/astutil/util.go | 11 + .../internal/ssa/_gen/vendor/modules.txt | 3 + src/cmd/compile/internal/ssa/generate_test.go | 135 ++++ 12 files changed, 1848 insertions(+), 3 deletions(-) create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/modules.txt create mode 100644 src/cmd/compile/internal/ssa/generate_test.go diff --git a/src/cmd/compile/internal/ssa/_gen/README b/src/cmd/compile/internal/ssa/_gen/README index 74b81c2814330c..a8242f93527097 100644 --- a/src/cmd/compile/internal/ssa/_gen/README +++ b/src/cmd/compile/internal/ssa/_gen/README @@ -9,3 +9,8 @@ more information. To regenerate everything, run "go generate" on the ssa package in the parent directory. + +The parent directory contains a test in generate_test.go that will fail +if the generated files are not up-to-date, and to allow that test to +run in no-network environments, golang.org/x/tools/go/ast/astutil is +vendored. diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go index 682fc5f20217e9..38acc5133abe86 100644 --- a/src/cmd/compile/internal/ssa/_gen/allocators.go +++ b/src/cmd/compile/internal/ssa/_gen/allocators.go @@ -155,7 +155,7 @@ func genAllocators() { panic(err) } - if err := os.WriteFile("../allocators.go", b, 0666); err != nil { + if err := os.WriteFile(outFile("allocators.go"), b, 0666); err != nil { log.Fatalf("can't write output: %v\n", err) } } diff --git a/src/cmd/compile/internal/ssa/_gen/main.go b/src/cmd/compile/internal/ssa/_gen/main.go index 13d3ce6f8f6305..5b85cec79c0e93 100644 --- a/src/cmd/compile/internal/ssa/_gen/main.go +++ b/src/cmd/compile/internal/ssa/_gen/main.go @@ -114,6 +114,7 @@ var archs []arch var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") var memprofile = flag.String("memprofile", "", "write memory profile to `file`") var tracefile = flag.String("trace", "", "write trace to `file`") +var outDir = flag.String("outdir", "..", "directory in which to write generated files") func main() { flag.Parse() @@ -145,6 +146,13 @@ func main() { defer trace.Stop() } + if *outDir != ".." { + err := os.MkdirAll(*outDir, 0755) + if err != nil { + log.Fatalf("failed to create output directory: %v", err) + } + } + slices.SortFunc(archs, func(a, b arch) int { return strings.Compare(a.name, b.name) }) @@ -194,6 +202,10 @@ func main() { } } +func outFile(file string) string { + return *outDir + "/" + file +} + func genOp() { w := new(bytes.Buffer) fmt.Fprintf(w, "// Code generated from _gen/*Ops.go using 'go generate'; DO NOT EDIT.\n") @@ -501,7 +513,7 @@ func genOp() { panic(err) } - if err := os.WriteFile("../opGen.go", b, 0666); err != nil { + if err := os.WriteFile(outFile("opGen.go"), b, 0666); err != nil { log.Fatalf("can't write output: %v\n", err) } diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index 558bbab6a75a9d..5e66398927793b 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -331,7 +331,7 @@ func genRulesSuffix(arch arch, suff string) { file = astutil.Apply(file, pre, post).(*ast.File) // Write the well-formatted source to file - f, err := os.Create("../rewrite" + arch.name + suff + ".go") + f, err := os.Create(outFile("rewrite" + arch.name + suff + ".go")) if err != nil { log.Fatalf("can't write output: %v", err) } diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000000000..2a7cf70da6e498 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000000000..733099041f84fa --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000000000..6e34df46130b75 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,654 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := n.Type.TypeParams; tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000000000..a6b5ed0a8933eb --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,490 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). +func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000000000..58934f76633d50 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,486 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000000000..ca71e3e1055387 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +// Deprecated: use [ast.Unparen]. +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt b/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt new file mode 100644 index 00000000000000..2efa97223356a2 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt @@ -0,0 +1,3 @@ +# golang.org/x/tools v0.27.0 +## explicit; go 1.22.0 +golang.org/x/tools/go/ast/astutil diff --git a/src/cmd/compile/internal/ssa/generate_test.go b/src/cmd/compile/internal/ssa/generate_test.go new file mode 100644 index 00000000000000..d65288c399996f --- /dev/null +++ b/src/cmd/compile/internal/ssa/generate_test.go @@ -0,0 +1,135 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "bytes" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +const expectedHeader = "// Code generated from _gen/" // this is the common part + +// TestGeneratedFilesUpToDate regenerates all the rewrite and rewrite-related +// files defined in _gen into a temporary directory, +// checks that they match what appears in the source tree, +// verifies that they start with the prefix of a generated header, +// and checks that the only source files with that header were actually generated. +func TestGeneratedFilesUpToDate(t *testing.T) { + testenv.MustHaveGoRun(t) + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get current working directory: %v", err) + } + genDir := filepath.Join(wd, "_gen") + if _, err := os.Stat(genDir); os.IsNotExist(err) { + t.Fatalf("_gen directory not found") + } + + tmpdir := t.TempDir() + + // Accumulate a list of all existing files that look generated. + // It's an error if this set does not match the set that are + // generated into tmpdir. + genFiles := make(map[string]bool) + genPrefix := []byte(expectedHeader) + ssaFiles, err := filepath.Glob(filepath.Join(wd, "*.go")) + if err != nil { + t.Fatalf("could not glob for .go files in ssa directory: %v", err) + } + for _, f := range ssaFiles { + contents, err := os.ReadFile(f) + if err != nil { + t.Fatalf("could not read source file from ssa directory: %v", err) + } + // verify that the generated file has the expected header + // (this should cause other failures later, but if this is + // the problem, diagnose it here to shorten the treasure hunt.) + if bytes.HasPrefix(contents, genPrefix) { + genFiles[filepath.Base(f)] = true + } + } + + goFiles, err := filepath.Glob(filepath.Join(genDir, "*.go")) + if err != nil { + t.Fatalf("could not glob for .go files in _gen: %v", err) + } + if len(goFiles) == 0 { + t.Fatal("no .go files found in _gen") + } + + // Construct the command line for "go run". + // Explicitly list the files, just to make it + // clear what is included (if the test is logging). + args := []string{"run", "-C", genDir} + for _, f := range goFiles { + args = append(args, filepath.Base(f)) + } + args = append(args, "-outdir", tmpdir) + + logArgs := fmt.Sprintf("%v", args) + logArgs = logArgs[1 : len(logArgs)-2] // strip '[' and ']' + t.Logf("%s %v", testenv.GoToolPath(t), logArgs) + output, err := testenv.Command(t, testenv.GoToolPath(t), args...).CombinedOutput() + + if err != nil { + t.Fatalf("go run in _gen failed: %v\n%s", err, output) + } + + // Compare generated files with existing files in the parent directory. + files, err := os.ReadDir(tmpdir) + if err != nil { + t.Fatalf("could not read tmpdir %s: %v", tmpdir, err) + } + + for _, file := range files { + if file.IsDir() { + continue + } + filename := file.Name() + + // filename must be in the generated set, + if !genFiles[filename] { + t.Errorf("%s does not start with the expected header '%s' (if the header was changed the test needs to be updated)", + filename, expectedHeader) + } + genFiles[filename] = false // remove from set + + generatedPath := filepath.Join(tmpdir, filename) + originalPath := filepath.Join(wd, filename) + + generatedData, err := os.ReadFile(generatedPath) + if err != nil { + t.Errorf("could not read generated file %s: %v", generatedPath, err) + continue + } + + // there should be a corresponding file in the ssa directory, + originalData, err := os.ReadFile(originalPath) + if err != nil { + if os.IsNotExist(err) { + t.Errorf("generated file %s was created, but does not exist in the ssa directory. It may need to be added to the repository.", filename) + } else { + t.Errorf("could not read original file %s: %v", originalPath, err) + } + continue + } + + // and the contents of that file should match. + if !bytes.Equal(originalData, generatedData) { + t.Errorf("%s is out of date. Please run 'go generate'.", filename) + } + } + + // the generated set should be empty now. + for file, notGenerated := range genFiles { + if notGenerated { + t.Errorf("%s has the header of a generated file but was not generated", file) + } + } +} From 00a8dacbe4dc87e4db636495ca9b39fa52808ff5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 13 Jun 2025 15:55:58 -0400 Subject: [PATCH 027/139] [dev.simd] cmd/compile: remove unused simd intrinsics "helpers" turns out they weren't helpful enough. Change-Id: I4fa99dc0e7513f25acaddd7fb06451b0134172b9 Reviewed-on: https://go-review.googlesource.com/c/go/+/681498 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssagen/intrinsics.go | 93 ------------------- 1 file changed, 93 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 40b3c41a79e189..d3a16a0f2431eb 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1609,99 +1609,6 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { } } -// simdLoadSliceMethod does intrinsic for method form of Load-from-slice -func simdLoadSliceMethod(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - // args[0] is unused except for its type. - t := args[0].Type - slice := args[1] - arrlen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? - return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) - } -} - -// simdLoadSlice does intrinsic for function form of Load-from-slice -func simdLoadSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - // args[0] is unused except for its type. - t := n.Type() - slice := args[0] - arrlen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? - return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) - } -} - -func simdStoreSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - x := args[0] - t := x.Type - slice := args[1] - arrlen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? - s.store(t, ptr, x) - return nil - } -} - -func simdLoadSliceMethodPart(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - // args[0] is unused except for its type. - t := args[0].Type - slice := args[1] - arrLen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - - /* - if off := vec.Len() - len(slice) ; off <= 0 { - plain load - } else { - load mask[off] into a scratch vector - masked load/store - } - */ - - // TODO SIMD support on a 32-bit processor - - off := s.newValue2(ssa.OpSub64, types.Types[types.TINT], arrLen, cap) - cond := s.newValue2(ssa.OpLeq64, types.Types[types.TBOOL], off, s.zeroVal(types.Types[types.TINT])) - b := s.endBlock() - b.Kind = ssa.BlockIf - b.SetControl(cond) - bTrue := s.f.NewBlock(ssa.BlockPlain) - bFalse := s.f.NewBlock(ssa.BlockPlain) - bEnd := s.f.NewBlock(ssa.BlockPlain) - b.AddEdgeTo(bTrue) - b.AddEdgeTo(bFalse) - - simdRes := ssaMarker("simdload") - - // We have atomic instructions - use it directly. - s.startBlock(bTrue) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) - s.vars[simdRes] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) - s.endBlock().AddEdgeTo(bEnd) - - // Use original instruction sequence. - s.startBlock(bFalse) - // NOT IMPLEMENTED, NEED TO ADD GENERIC PARTIAL LOAD/STORE - // MASK REGISTER DEPENDS ON ARCH AND ITS SIMD VERSION. - s.endBlock().AddEdgeTo(bEnd) - - // Merge results. - s.startBlock(bEnd) - return s.variable(simdRes, t) - - } -} - // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { From 7392dfd43e155b8b66d89eb8a3670cf7ff9c9a2f Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 13 Jun 2025 16:12:16 -0400 Subject: [PATCH 028/139] [dev.simd] cmd/compile: generated simd*ops files weren't up to date I re-ran the generator in arch/internal/simd to verify a clean move of the intrinsics helpers, and these changes (which look correct) appeared. Change-Id: I28a0e8bd144d47aec216f557f238362f238d0428 Reviewed-on: https://go-review.googlesource.com/c/go/+/681499 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- .../compile/internal/ssa/_gen/simdAMD64ops.go | 44 +- .../internal/ssa/_gen/simdgenericOps.go | 88 ++-- src/cmd/compile/internal/ssa/opGen.go | 462 ++++++++---------- 3 files changed, 264 insertions(+), 330 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b9a7bc59a56152..651a4365c7c09d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -5,7 +5,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -29,7 +29,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -68,7 +68,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -92,7 +92,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -133,7 +133,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -157,7 +157,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -198,7 +198,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -222,7 +222,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -263,7 +263,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -287,7 +287,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -327,7 +327,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -351,7 +351,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -390,7 +390,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -451,7 +451,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -486,13 +486,13 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQD512", argLength: 2, reg: fp2k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTD512", argLength: 2, reg: fp2k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -524,7 +524,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -558,7 +558,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -592,7 +592,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -615,7 +615,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -634,13 +634,13 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQQ512", argLength: 2, reg: fp2k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQ512", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 5c86f280913c7b..a29decdf008110 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -5,7 +5,7 @@ func simdGenericOps() []opData { return []opData{ {name: "AddFloat32x16", argLength: 2, commutative: true}, {name: "AndFloat32x16", argLength: 2, commutative: true}, - {name: "AndNotFloat32x16", argLength: 2, commutative: true}, + {name: "AndNotFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, @@ -35,7 +35,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, {name: "MaskedAndFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, @@ -84,7 +84,7 @@ func simdGenericOps() []opData { {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "AndFloat32x4", argLength: 2, commutative: true}, - {name: "AndNotFloat32x4", argLength: 2, commutative: true}, + {name: "AndNotFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, @@ -116,7 +116,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, {name: "MaskedAndFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, @@ -169,7 +169,7 @@ func simdGenericOps() []opData { {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "AndFloat32x8", argLength: 2, commutative: true}, - {name: "AndNotFloat32x8", argLength: 2, commutative: true}, + {name: "AndNotFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, @@ -201,7 +201,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, {name: "MaskedAndFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, @@ -254,7 +254,7 @@ func simdGenericOps() []opData { {name: "AddFloat64x2", argLength: 2, commutative: true}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, {name: "AndFloat64x2", argLength: 2, commutative: true}, - {name: "AndNotFloat64x2", argLength: 2, commutative: true}, + {name: "AndNotFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, @@ -287,7 +287,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, {name: "MaskedAndFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, @@ -340,7 +340,7 @@ func simdGenericOps() []opData { {name: "AddFloat64x4", argLength: 2, commutative: true}, {name: "AddSubFloat64x4", argLength: 2, commutative: false}, {name: "AndFloat64x4", argLength: 2, commutative: true}, - {name: "AndNotFloat64x4", argLength: 2, commutative: true}, + {name: "AndNotFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, @@ -372,7 +372,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, {name: "MaskedAndFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, @@ -424,7 +424,7 @@ func simdGenericOps() []opData { {name: "XorFloat64x4", argLength: 2, commutative: true}, {name: "AddFloat64x8", argLength: 2, commutative: true}, {name: "AndFloat64x8", argLength: 2, commutative: true}, - {name: "AndNotFloat64x8", argLength: 2, commutative: true}, + {name: "AndNotFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, @@ -454,7 +454,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, {name: "MaskedAndFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, @@ -503,7 +503,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, {name: "AddInt16x16", argLength: 2, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, - {name: "AndNotInt16x16", argLength: 2, commutative: true}, + {name: "AndNotInt16x16", argLength: 2, commutative: false}, {name: "EqualInt16x16", argLength: 2, commutative: true}, {name: "GreaterInt16x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, @@ -580,7 +580,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, {name: "AddInt16x8", argLength: 2, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, - {name: "AndNotInt16x8", argLength: 2, commutative: true}, + {name: "AndNotInt16x8", argLength: 2, commutative: false}, {name: "EqualInt16x8", argLength: 2, commutative: true}, {name: "GreaterInt16x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, @@ -623,7 +623,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, {name: "AddInt32x16", argLength: 2, commutative: true}, {name: "AndInt32x16", argLength: 2, commutative: true}, - {name: "AndNotInt32x16", argLength: 2, commutative: true}, + {name: "AndNotInt32x16", argLength: 2, commutative: false}, {name: "EqualInt32x16", argLength: 2, commutative: true}, {name: "GreaterInt32x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, @@ -632,7 +632,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt32x16", argLength: 2, commutative: false}, {name: "MaskedAddInt32x16", argLength: 3, commutative: true}, {name: "MaskedAndInt32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x16", argLength: 3, commutative: false}, {name: "MaskedEqualInt32x16", argLength: 3, commutative: true}, {name: "MaskedGreaterInt32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt32x16", argLength: 3, commutative: false}, @@ -665,7 +665,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, {name: "AddInt32x4", argLength: 2, commutative: true}, {name: "AndInt32x4", argLength: 2, commutative: true}, - {name: "AndNotInt32x4", argLength: 2, commutative: true}, + {name: "AndNotInt32x4", argLength: 2, commutative: false}, {name: "EqualInt32x4", argLength: 2, commutative: true}, {name: "GreaterInt32x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, @@ -674,7 +674,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt32x4", argLength: 2, commutative: false}, {name: "MaskedAddInt32x4", argLength: 3, commutative: true}, {name: "MaskedAndInt32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x4", argLength: 3, commutative: false}, {name: "MaskedEqualInt32x4", argLength: 3, commutative: true}, {name: "MaskedGreaterInt32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt32x4", argLength: 3, commutative: false}, @@ -711,7 +711,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, {name: "AddInt32x8", argLength: 2, commutative: true}, {name: "AndInt32x8", argLength: 2, commutative: true}, - {name: "AndNotInt32x8", argLength: 2, commutative: true}, + {name: "AndNotInt32x8", argLength: 2, commutative: false}, {name: "EqualInt32x8", argLength: 2, commutative: true}, {name: "GreaterInt32x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, @@ -720,7 +720,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt32x8", argLength: 2, commutative: false}, {name: "MaskedAddInt32x8", argLength: 3, commutative: true}, {name: "MaskedAndInt32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x8", argLength: 3, commutative: false}, {name: "MaskedEqualInt32x8", argLength: 3, commutative: true}, {name: "MaskedGreaterInt32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt32x8", argLength: 3, commutative: false}, @@ -757,7 +757,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, {name: "AddInt64x2", argLength: 2, commutative: true}, {name: "AndInt64x2", argLength: 2, commutative: true}, - {name: "AndNotInt64x2", argLength: 2, commutative: true}, + {name: "AndNotInt64x2", argLength: 2, commutative: false}, {name: "EqualInt64x2", argLength: 2, commutative: true}, {name: "GreaterInt64x2", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, @@ -766,7 +766,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt64x2", argLength: 2, commutative: false}, {name: "MaskedAddInt64x2", argLength: 3, commutative: true}, {name: "MaskedAndInt64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x2", argLength: 3, commutative: false}, {name: "MaskedEqualInt64x2", argLength: 3, commutative: true}, {name: "MaskedGreaterInt64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt64x2", argLength: 3, commutative: false}, @@ -793,7 +793,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, {name: "AddInt64x4", argLength: 2, commutative: true}, {name: "AndInt64x4", argLength: 2, commutative: true}, - {name: "AndNotInt64x4", argLength: 2, commutative: true}, + {name: "AndNotInt64x4", argLength: 2, commutative: false}, {name: "EqualInt64x4", argLength: 2, commutative: true}, {name: "GreaterInt64x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, @@ -802,7 +802,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt64x4", argLength: 2, commutative: false}, {name: "MaskedAddInt64x4", argLength: 3, commutative: true}, {name: "MaskedAndInt64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x4", argLength: 3, commutative: false}, {name: "MaskedEqualInt64x4", argLength: 3, commutative: true}, {name: "MaskedGreaterInt64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt64x4", argLength: 3, commutative: false}, @@ -829,7 +829,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, {name: "AddInt64x8", argLength: 2, commutative: true}, {name: "AndInt64x8", argLength: 2, commutative: true}, - {name: "AndNotInt64x8", argLength: 2, commutative: true}, + {name: "AndNotInt64x8", argLength: 2, commutative: false}, {name: "EqualInt64x8", argLength: 2, commutative: true}, {name: "GreaterInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, @@ -838,7 +838,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt64x8", argLength: 2, commutative: false}, {name: "MaskedAddInt64x8", argLength: 3, commutative: true}, {name: "MaskedAndInt64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x8", argLength: 3, commutative: false}, {name: "MaskedEqualInt64x8", argLength: 3, commutative: true}, {name: "MaskedGreaterInt64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt64x8", argLength: 3, commutative: false}, @@ -865,7 +865,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, {name: "AddInt8x16", argLength: 2, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, - {name: "AndNotInt8x16", argLength: 2, commutative: true}, + {name: "AndNotInt8x16", argLength: 2, commutative: false}, {name: "EqualInt8x16", argLength: 2, commutative: true}, {name: "GreaterInt8x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, @@ -898,7 +898,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, {name: "AddInt8x32", argLength: 2, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, - {name: "AndNotInt8x32", argLength: 2, commutative: true}, + {name: "AndNotInt8x32", argLength: 2, commutative: false}, {name: "EqualInt8x32", argLength: 2, commutative: true}, {name: "GreaterInt8x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, @@ -958,7 +958,7 @@ func simdGenericOps() []opData { {name: "SubInt8x64", argLength: 2, commutative: false}, {name: "AddUint16x16", argLength: 2, commutative: true}, {name: "AndUint16x16", argLength: 2, commutative: true}, - {name: "AndNotUint16x16", argLength: 2, commutative: true}, + {name: "AndNotUint16x16", argLength: 2, commutative: false}, {name: "AverageUint16x16", argLength: 2, commutative: true}, {name: "EqualUint16x16", argLength: 2, commutative: true}, {name: "GreaterUint16x16", argLength: 2, commutative: false}, @@ -1028,7 +1028,7 @@ func simdGenericOps() []opData { {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, - {name: "AndNotUint16x8", argLength: 2, commutative: true}, + {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AverageUint16x8", argLength: 2, commutative: true}, {name: "EqualUint16x8", argLength: 2, commutative: true}, {name: "GreaterUint16x8", argLength: 2, commutative: false}, @@ -1066,7 +1066,7 @@ func simdGenericOps() []opData { {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, {name: "AndUint32x16", argLength: 2, commutative: true}, - {name: "AndNotUint32x16", argLength: 2, commutative: true}, + {name: "AndNotUint32x16", argLength: 2, commutative: false}, {name: "EqualUint32x16", argLength: 2, commutative: true}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, @@ -1074,7 +1074,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint32x16", argLength: 2, commutative: false}, {name: "MaskedAddUint32x16", argLength: 3, commutative: true}, {name: "MaskedAndUint32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x16", argLength: 3, commutative: false}, {name: "MaskedEqualUint32x16", argLength: 3, commutative: true}, {name: "MaskedGreaterUint32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint32x16", argLength: 3, commutative: false}, @@ -1100,7 +1100,7 @@ func simdGenericOps() []opData { {name: "XorUint32x16", argLength: 2, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, - {name: "AndNotUint32x4", argLength: 2, commutative: true}, + {name: "AndNotUint32x4", argLength: 2, commutative: false}, {name: "EqualUint32x4", argLength: 2, commutative: true}, {name: "GreaterUint32x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, @@ -1108,7 +1108,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint32x4", argLength: 2, commutative: false}, {name: "MaskedAddUint32x4", argLength: 3, commutative: true}, {name: "MaskedAndUint32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x4", argLength: 3, commutative: false}, {name: "MaskedEqualUint32x4", argLength: 3, commutative: true}, {name: "MaskedGreaterUint32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint32x4", argLength: 3, commutative: false}, @@ -1137,7 +1137,7 @@ func simdGenericOps() []opData { {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, - {name: "AndNotUint32x8", argLength: 2, commutative: true}, + {name: "AndNotUint32x8", argLength: 2, commutative: false}, {name: "EqualUint32x8", argLength: 2, commutative: true}, {name: "GreaterUint32x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, @@ -1145,7 +1145,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint32x8", argLength: 2, commutative: false}, {name: "MaskedAddUint32x8", argLength: 3, commutative: true}, {name: "MaskedAndUint32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x8", argLength: 3, commutative: false}, {name: "MaskedEqualUint32x8", argLength: 3, commutative: true}, {name: "MaskedGreaterUint32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint32x8", argLength: 3, commutative: false}, @@ -1174,7 +1174,7 @@ func simdGenericOps() []opData { {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, {name: "AndUint64x2", argLength: 2, commutative: true}, - {name: "AndNotUint64x2", argLength: 2, commutative: true}, + {name: "AndNotUint64x2", argLength: 2, commutative: false}, {name: "EqualUint64x2", argLength: 2, commutative: true}, {name: "GreaterUint64x2", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, @@ -1182,7 +1182,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint64x2", argLength: 2, commutative: false}, {name: "MaskedAddUint64x2", argLength: 3, commutative: true}, {name: "MaskedAndUint64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x2", argLength: 3, commutative: false}, {name: "MaskedEqualUint64x2", argLength: 3, commutative: true}, {name: "MaskedGreaterUint64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint64x2", argLength: 3, commutative: false}, @@ -1206,7 +1206,7 @@ func simdGenericOps() []opData { {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "AddUint64x4", argLength: 2, commutative: true}, {name: "AndUint64x4", argLength: 2, commutative: true}, - {name: "AndNotUint64x4", argLength: 2, commutative: true}, + {name: "AndNotUint64x4", argLength: 2, commutative: false}, {name: "EqualUint64x4", argLength: 2, commutative: true}, {name: "GreaterUint64x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, @@ -1214,7 +1214,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint64x4", argLength: 2, commutative: false}, {name: "MaskedAddUint64x4", argLength: 3, commutative: true}, {name: "MaskedAndUint64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x4", argLength: 3, commutative: false}, {name: "MaskedEqualUint64x4", argLength: 3, commutative: true}, {name: "MaskedGreaterUint64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint64x4", argLength: 3, commutative: false}, @@ -1238,7 +1238,7 @@ func simdGenericOps() []opData { {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "AddUint64x8", argLength: 2, commutative: true}, {name: "AndUint64x8", argLength: 2, commutative: true}, - {name: "AndNotUint64x8", argLength: 2, commutative: true}, + {name: "AndNotUint64x8", argLength: 2, commutative: false}, {name: "EqualUint64x8", argLength: 2, commutative: true}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, @@ -1246,7 +1246,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint64x8", argLength: 2, commutative: false}, {name: "MaskedAddUint64x8", argLength: 3, commutative: true}, {name: "MaskedAndUint64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x8", argLength: 3, commutative: false}, {name: "MaskedEqualUint64x8", argLength: 3, commutative: true}, {name: "MaskedGreaterUint64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint64x8", argLength: 3, commutative: false}, @@ -1270,7 +1270,7 @@ func simdGenericOps() []opData { {name: "XorUint64x8", argLength: 2, commutative: true}, {name: "AddUint8x16", argLength: 2, commutative: true}, {name: "AndUint8x16", argLength: 2, commutative: true}, - {name: "AndNotUint8x16", argLength: 2, commutative: true}, + {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, {name: "GreaterUint8x16", argLength: 2, commutative: false}, @@ -1303,7 +1303,7 @@ func simdGenericOps() []opData { {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "AddUint8x32", argLength: 2, commutative: true}, {name: "AndUint8x32", argLength: 2, commutative: true}, - {name: "AndNotUint8x32", argLength: 2, commutative: true}, + {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, {name: "GreaterUint8x32", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 106f3e16574733..d2e86702d838f0 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -18484,10 +18484,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS512", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPS512", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18859,10 +18858,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPSMasked512", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19479,10 +19477,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPS128", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19854,10 +19851,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPSMasked128", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20502,10 +20498,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPS256", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20877,10 +20872,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPSMasked256", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21525,10 +21519,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPD128", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21900,10 +21893,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPDMasked128", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22548,10 +22540,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD256", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPD256", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22923,10 +22914,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPDMasked256", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23557,10 +23547,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPD512", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23932,10 +23921,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPDMasked512", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24551,10 +24539,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - commutative: true, - asm: x86.AVPANDN, + name: "VPANDN256", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25455,10 +25442,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", - argLen: 2, - commutative: true, - asm: x86.AVPANDN, + name: "VPANDN128", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25972,10 +25958,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDND512", + argLen: 2, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26062,10 +26047,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDNDMasked512", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26555,10 +26539,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDNDMasked128", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27075,10 +27058,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDNDMasked256", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27595,10 +27577,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQMasked128", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27942,10 +27923,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQMasked256", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28229,10 +28209,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", - argLen: 2, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQ512", + argLen: 2, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28319,10 +28298,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -59277,10 +59255,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat32x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat32x16", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat32x16", @@ -59432,10 +59409,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat32x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat32x16", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat32x16", @@ -59694,10 +59670,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat32x4", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat32x4", @@ -59859,10 +59834,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat32x4", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat32x4", @@ -60141,10 +60115,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat32x8", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat32x8", @@ -60306,10 +60279,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat32x8", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat32x8", @@ -60588,10 +60560,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat64x2", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat64x2", @@ -60759,10 +60730,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat64x2", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat64x2", @@ -61041,10 +61011,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat64x4", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat64x4", @@ -61206,10 +61175,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat64x4", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat64x4", @@ -61483,10 +61451,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat64x8", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat64x8", @@ -61638,10 +61605,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat64x8", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat64x8", @@ -61900,10 +61866,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt16x16", + argLen: 2, + generic: true, }, { name: "EqualInt16x16", @@ -62321,10 +62286,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt16x8", + argLen: 2, + generic: true, }, { name: "EqualInt16x8", @@ -62556,10 +62520,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt32x16", + argLen: 2, + generic: true, }, { name: "EqualInt32x16", @@ -62605,10 +62568,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt32x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt32x16", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt32x16", @@ -62786,10 +62748,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt32x4", + argLen: 2, + generic: true, }, { name: "EqualInt32x4", @@ -62835,10 +62796,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt32x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt32x4", @@ -63037,10 +62997,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt32x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt32x8", + argLen: 2, + generic: true, }, { name: "EqualInt32x8", @@ -63086,10 +63045,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt32x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt32x8", @@ -63288,10 +63246,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt64x2", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt64x2", + argLen: 2, + generic: true, }, { name: "EqualInt64x2", @@ -63337,10 +63294,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt64x2", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt64x2", @@ -63490,10 +63446,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt64x4", + argLen: 2, + generic: true, }, { name: "EqualInt64x4", @@ -63539,10 +63494,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt64x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt64x4", @@ -63692,10 +63646,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt64x8", + argLen: 2, + generic: true, }, { name: "EqualInt64x8", @@ -63741,10 +63694,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt64x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt64x8", @@ -63894,10 +63846,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt8x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt8x16", + argLen: 2, + generic: true, }, { name: "EqualInt8x16", @@ -64075,10 +64026,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt8x32", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt8x32", + argLen: 2, + generic: true, }, { name: "EqualInt8x32", @@ -64403,10 +64353,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint16x16", + argLen: 2, + generic: true, }, { name: "AverageUint16x16", @@ -64789,10 +64738,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint16x8", + argLen: 2, + generic: true, }, { name: "AverageUint16x8", @@ -64999,10 +64947,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x16", + argLen: 2, + generic: true, }, { name: "EqualUint32x16", @@ -65043,10 +64990,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint32x16", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint32x16", @@ -65187,10 +65133,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x4", + argLen: 2, + generic: true, }, { name: "EqualUint32x4", @@ -65231,10 +65176,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint32x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint32x4", @@ -65391,10 +65335,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x8", + argLen: 2, + generic: true, }, { name: "EqualUint32x8", @@ -65435,10 +65378,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint32x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint32x8", @@ -65595,10 +65537,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint64x2", + argLen: 2, + generic: true, }, { name: "EqualUint64x2", @@ -65639,10 +65580,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint64x2", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint64x2", @@ -65775,10 +65715,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint64x4", + argLen: 2, + generic: true, }, { name: "EqualUint64x4", @@ -65819,10 +65758,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint64x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint64x4", @@ -65955,10 +65893,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint64x8", + argLen: 2, + generic: true, }, { name: "EqualUint64x8", @@ -65999,10 +65936,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint64x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint64x8", @@ -66135,10 +66071,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint8x16", + argLen: 2, + generic: true, }, { name: "AverageUint8x16", @@ -66318,10 +66253,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint8x32", + argLen: 2, + generic: true, }, { name: "AverageUint8x32", From 6c50c8b892bc032960ac8ab23c78765be52f904f Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 13 Jun 2025 16:10:22 -0400 Subject: [PATCH 029/139] [dev.simd] cmd/compile: move simd helpers into compiler, out of generated code PAIRED w/ arch/internal/simdgen CL 681615 This moves the helpers out of the generated code. Change-Id: I6150afd45dbdf8d1499e0b8ee80c1bd8be5d558e Reviewed-on: https://go-review.googlesource.com/c/go/+/681500 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics.go | 101 ++++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 101 ------------------ 2 files changed, 101 insertions(+), 101 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index d3a16a0f2431eb..186cfc4865ed18 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1609,6 +1609,107 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { } } +func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(op, t, args[0]) + } +} + +func opLen2(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(op, t, args[0], args[1]) + } +} + +func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[0], args[1], args[2]) + } +} + +func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[0], args[1], args[2], args[3]) + } +} + +func plainPanicSimdImm(s *state) { + cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) + cmp.AuxInt = 1 + // TODO: make this a standalone panic instead of reusing the overflow panic. + // Or maybe after we implement the switch table this will be obsolete anyway. + s.check(cmp, ir.Syms.Panicoverflow) +} + +func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue1I(op, t, args[1].AuxInt< Date: Mon, 16 Jun 2025 20:11:27 +0000 Subject: [PATCH 030/139] [dev.simd] cmd/compile: reorder stubs This CL is generated by CL 682035. Change-Id: I0a8b7382470afb5a6571ab7d4abe038de0ff239e Reviewed-on: https://go-review.googlesource.com/c/go/+/682055 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Auto-Submit: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 755 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 90 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 93 +- src/cmd/compile/internal/ssa/opGen.go | 1093 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 201 +- .../compile/internal/ssagen/simdintrinsics.go | 100 +- src/simd/stubs_amd64.go | 10854 ++++++++-------- src/simd/types_amd64.go | 264 +- 8 files changed, 6704 insertions(+), 6746 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 5fc068c895c6b7..484c389cef254f 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -12,21 +12,21 @@ import ( func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { var p *obj.Prog switch v.Op { - case ssa.OpAMD64VPABSW256, + case ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPABSB256, + ssa.OpAMD64VPABSB512, ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPABSW512, ssa.OpAMD64VPABSD128, ssa.OpAMD64VPABSD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPABSW512, ssa.OpAMD64VPABSD512, ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VRCP14PS512, ssa.OpAMD64VRCP14PS128, ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VRCP14PS512, ssa.OpAMD64VRCP14PD128, ssa.OpAMD64VRCP14PD256, ssa.OpAMD64VRCP14PD512, @@ -36,400 +36,395 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VPOPCNTW128, ssa.OpAMD64VPOPCNTW256, ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPOPCNTD512, ssa.OpAMD64VPOPCNTD128, ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPOPCNTD512, ssa.OpAMD64VPOPCNTQ128, ssa.OpAMD64VPOPCNTQ256, ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VPOPCNTB512, ssa.OpAMD64VSQRTPS128, ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VSQRTPS512, ssa.OpAMD64VSQRTPD128, ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VSQRTPS512, ssa.OpAMD64VSQRTPD512: p = simdFp11(s, v) case ssa.OpAMD64VADDPS128, ssa.OpAMD64VADDPS256, + ssa.OpAMD64VADDPS512, ssa.OpAMD64VADDPD128, ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPADDW256, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPADDB512, ssa.OpAMD64VPADDW128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VPADDW512, ssa.OpAMD64VPADDD128, ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPADDD512, ssa.OpAMD64VPADDQ128, ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPADDD512, ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPADDB512, ssa.OpAMD64VADDSUBPS128, ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, ssa.OpAMD64VADDSUBPD256, ssa.OpAMD64VANDPS128, ssa.OpAMD64VANDPS256, + ssa.OpAMD64VANDPS512, ssa.OpAMD64VANDPD128, ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VANDPS512, ssa.OpAMD64VANDPD512, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPAND256, ssa.OpAMD64VPANDD512, ssa.OpAMD64VPANDQ512, ssa.OpAMD64VANDNPS128, ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VANDNPS512, ssa.OpAMD64VANDNPD128, ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VANDNPS512, ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VPANDN256, ssa.OpAMD64VPANDND512, ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VPAVGW128, ssa.OpAMD64VPAVGB128, ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPAVGW512, ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VPAVGW512, ssa.OpAMD64VDIVPS128, ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VDIVPS512, ssa.OpAMD64VDIVPD128, ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VDIVPS512, ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPCMPEQB256, ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPCMPEQW256, ssa.OpAMD64VPCMPEQD128, ssa.OpAMD64VPCMPEQD256, ssa.OpAMD64VPCMPEQQ128, ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPCMPGTB256, ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VPCMPGTW256, ssa.OpAMD64VPCMPGTD128, ssa.OpAMD64VPCMPGTD256, ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPCMPGTB256, ssa.OpAMD64VMAXPS128, ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VMAXPS512, ssa.OpAMD64VMAXPD128, ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VMAXPD512, ssa.OpAMD64VPMAXSB128, ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMAXSW256, ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPMAXSD256, ssa.OpAMD64VPMAXSD512, ssa.OpAMD64VPMAXSQ128, ssa.OpAMD64VPMAXSQ256, ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VPMAXUW256, ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMAXUD256, ssa.OpAMD64VPMAXUD512, ssa.OpAMD64VPMAXUQ128, ssa.OpAMD64VPMAXUQ256, ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPMAXUB512, ssa.OpAMD64VMINPS128, ssa.OpAMD64VMINPS256, + ssa.OpAMD64VMINPS512, ssa.OpAMD64VMINPD128, ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VMINPD512, ssa.OpAMD64VPMINSB128, ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMINSW256, ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPMINSD256, ssa.OpAMD64VPMINSD512, ssa.OpAMD64VPMINSQ128, ssa.OpAMD64VPMINSQ256, ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VPMINUW256, ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPMINUD256, ssa.OpAMD64VPMINUD512, ssa.OpAMD64VPMINUQ128, ssa.OpAMD64VPMINUQ256, ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VPMINUB512, ssa.OpAMD64VMULPS128, ssa.OpAMD64VMULPS256, + ssa.OpAMD64VMULPS512, ssa.OpAMD64VMULPD128, ssa.OpAMD64VMULPD256, - ssa.OpAMD64VMULPS512, ssa.OpAMD64VMULPD512, - ssa.OpAMD64VSCALEFPS512, ssa.OpAMD64VSCALEFPS128, ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VSCALEFPS512, ssa.OpAMD64VSCALEFPD128, ssa.OpAMD64VSCALEFPD256, ssa.OpAMD64VSCALEFPD512, ssa.OpAMD64VPMULDQ128, ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPMULDQ512, ssa.OpAMD64VPMULUDQ128, ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VPMULDQ512, ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPMULHW256, ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPMULHW256, ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPMULHUW256, ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPMULLW256, ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VPMULLW512, ssa.OpAMD64VPMULLD128, ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VPMULLW512, ssa.OpAMD64VPMULLD512, ssa.OpAMD64VPMULLQ128, ssa.OpAMD64VPMULLQ256, ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VORPS128, ssa.OpAMD64VORPS256, + ssa.OpAMD64VORPS512, ssa.OpAMD64VORPD128, ssa.OpAMD64VORPD256, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VORPS512, ssa.OpAMD64VORPD512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPMADDWD256, ssa.OpAMD64VPMADDWD128, + ssa.OpAMD64VPMADDWD256, ssa.OpAMD64VPMADDWD512, ssa.OpAMD64VHADDPS128, ssa.OpAMD64VHADDPS256, ssa.OpAMD64VHADDPD128, ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPHADDW256, ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPHADDW256, ssa.OpAMD64VPHADDD128, ssa.OpAMD64VPHADDD256, ssa.OpAMD64VHSUBPS128, ssa.OpAMD64VHSUBPS256, ssa.OpAMD64VHSUBPD128, ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBD128, ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPADDSW128, ssa.OpAMD64VPADDSB128, ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPADDSW512, ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPADDSW512, ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VPHADDSW256, ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPHSUBSW256, ssa.OpAMD64VPSUBSB128, ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPMADDUBSW128, ssa.OpAMD64VPMADDUBSW256, ssa.OpAMD64VPMADDUBSW512, - ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPSIGNW256, ssa.OpAMD64VPSIGND128, ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VSUBPS128, + ssa.OpAMD64VSUBPS256, + ssa.OpAMD64VSUBPS512, + ssa.OpAMD64VSUBPD128, + ssa.OpAMD64VSUBPD256, + ssa.OpAMD64VSUBPD512, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VPSUBB512, ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPSUBW512, ssa.OpAMD64VPSUBD128, ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VPSUBD512, ssa.OpAMD64VPSUBQ128, ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VPSUBD512, ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPSUBB512, ssa.OpAMD64VXORPS128, ssa.OpAMD64VXORPS256, + ssa.OpAMD64VXORPS512, ssa.OpAMD64VXORPD128, ssa.OpAMD64VXORPD256, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VXORPS512, ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, ssa.OpAMD64VPXORQ512: p = simdFp21(s, v) - case ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPCMPEQD512, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VPCMPEQB512, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPGTD512, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VPCMPGTB512: - p = simdFp2k1(s, v) - - case ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VADDPSMasked128, + case ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPSMasked512, ssa.OpAMD64VADDPDMasked128, ssa.OpAMD64VADDPDMasked256, ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDWMasked128, ssa.OpAMD64VPADDWMasked256, ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDDMasked128, ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPSMasked128, ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPDMasked128, ssa.OpAMD64VANDPDMasked256, ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPSMasked128, ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPDMasked128, ssa.OpAMD64VANDNPDMasked256, ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNQMasked128, ssa.OpAMD64VPANDNQMasked256, ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPDMasked128, ssa.OpAMD64VMAXPDMasked256, ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXSWMasked128, ssa.OpAMD64VPMAXSWMasked256, ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSQMasked128, ssa.OpAMD64VPMAXSQMasked256, ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMAXUWMasked128, ssa.OpAMD64VPMAXUWMasked256, ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUDMasked128, ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUQMasked128, ssa.OpAMD64VPMAXUQMasked256, ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPSMasked128, ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPDMasked128, ssa.OpAMD64VMINPDMasked256, ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINSWMasked128, ssa.OpAMD64VPMINSWMasked256, ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSDMasked128, ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSQMasked128, ssa.OpAMD64VPMINSQMasked256, ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMINUWMasked128, ssa.OpAMD64VPMINUWMasked256, ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUDMasked128, ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPDMasked128, ssa.OpAMD64VMULPDMasked256, ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPDMasked128, ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, @@ -439,142 +434,122 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULUDQMasked128, ssa.OpAMD64VPMULUDQMasked256, ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLDMasked128, ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPDMasked128, ssa.OpAMD64VORPDMasked256, ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VSUBPSMasked128, + ssa.OpAMD64VSUBPSMasked256, + ssa.OpAMD64VSUBPSMasked512, + ssa.OpAMD64VSUBPDMasked128, + ssa.OpAMD64VSUBPDMasked256, + ssa.OpAMD64VSUBPDMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPSUBWMasked128, ssa.OpAMD64VPSUBWMasked256, ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBDMasked128, ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPDMasked128, ssa.OpAMD64VXORPDMasked256, ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: p = simdFp2k1fp1(s, v) - case ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VPCMPEQDMasked512, - ssa.OpAMD64VPCMPEQDMasked128, - ssa.OpAMD64VPCMPEQDMasked256, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VPCMPEQBMasked128, - ssa.OpAMD64VPCMPEQBMasked256, - ssa.OpAMD64VPCMPEQBMasked512, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPCMPGTDMasked512, - ssa.OpAMD64VPCMPGTDMasked128, - ssa.OpAMD64VPCMPGTDMasked256, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VPCMPGTBMasked128, - ssa.OpAMD64VPCMPGTBMasked256, - ssa.OpAMD64VPCMPGTBMasked512: - p = simdFp2k1k1(s, v) - - case ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPABSWMasked512, + case ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSDMasked128, ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSDMasked512, ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PDMasked128, ssa.OpAMD64VRCP14PDMasked256, ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PSMasked128, ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPOPCNTWMasked128, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTDMasked128, ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512: @@ -584,29 +559,29 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VROUNDPS256, ssa.OpAMD64VROUNDPD128, ssa.OpAMD64VROUNDPD256, - ssa.OpAMD64VRNDSCALEPS512, ssa.OpAMD64VRNDSCALEPS128, ssa.OpAMD64VRNDSCALEPS256, + ssa.OpAMD64VRNDSCALEPS512, ssa.OpAMD64VRNDSCALEPD128, ssa.OpAMD64VRNDSCALEPD256, ssa.OpAMD64VRNDSCALEPD512, - ssa.OpAMD64VREDUCEPS512, ssa.OpAMD64VREDUCEPS128, ssa.OpAMD64VREDUCEPS256, + ssa.OpAMD64VREDUCEPS512, ssa.OpAMD64VREDUCEPD128, ssa.OpAMD64VREDUCEPD256, ssa.OpAMD64VREDUCEPD512: p = simdFp11Imm8(s, v) - case ssa.OpAMD64VRNDSCALEPSMasked512, - ssa.OpAMD64VRNDSCALEPSMasked128, + case ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPSMasked512, ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, - ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512: @@ -621,169 +596,169 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPUW128, ssa.OpAMD64VPCMPUW256, ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUD128, ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUQ128, ssa.OpAMD64VPCMPUQ256, ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPCMPQ512, ssa.OpAMD64VPCMPB128, ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPB512: + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPQ256: p = simdFp2k1Imm8(s, v) - case ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VCMPPSMasked128, + case ssa.OpAMD64VCMPPSMasked128, ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VCMPPSMasked512, ssa.OpAMD64VCMPPDMasked128, ssa.OpAMD64VCMPPDMasked256, ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPWMasked128, ssa.OpAMD64VPCMPWMasked256, ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VPCMPDMasked512, ssa.OpAMD64VPCMPDMasked128, ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPDMasked512, ssa.OpAMD64VPCMPQMasked128, ssa.OpAMD64VPCMPQMasked256, ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPBMasked512: + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPUQMasked512: p = simdFp2k1k1Imm8(s, v) - case ssa.OpAMD64VFMADD132PS512, - ssa.OpAMD64VFMADD132PS128, + case ssa.OpAMD64VFMADD132PS128, ssa.OpAMD64VFMADD132PS256, + ssa.OpAMD64VFMADD132PS512, ssa.OpAMD64VFMADD132PD128, ssa.OpAMD64VFMADD132PD256, ssa.OpAMD64VFMADD132PD512, - ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, + ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PD128, ssa.OpAMD64VFMADD213PD256, ssa.OpAMD64VFMADD213PD512, - ssa.OpAMD64VFMADD231PS512, ssa.OpAMD64VFMADD231PS128, ssa.OpAMD64VFMADD231PS256, + ssa.OpAMD64VFMADD231PS512, ssa.OpAMD64VFMADD231PD128, ssa.OpAMD64VFMADD231PD256, ssa.OpAMD64VFMADD231PD512, - ssa.OpAMD64VFMADDSUB132PS512, ssa.OpAMD64VFMADDSUB132PS128, ssa.OpAMD64VFMADDSUB132PS256, + ssa.OpAMD64VFMADDSUB132PS512, ssa.OpAMD64VFMADDSUB132PD128, ssa.OpAMD64VFMADDSUB132PD256, ssa.OpAMD64VFMADDSUB132PD512, - ssa.OpAMD64VFMADDSUB213PS512, ssa.OpAMD64VFMADDSUB213PS128, ssa.OpAMD64VFMADDSUB213PS256, + ssa.OpAMD64VFMADDSUB213PS512, ssa.OpAMD64VFMADDSUB213PD128, ssa.OpAMD64VFMADDSUB213PD256, ssa.OpAMD64VFMADDSUB213PD512, - ssa.OpAMD64VFMADDSUB231PS512, ssa.OpAMD64VFMADDSUB231PS128, ssa.OpAMD64VFMADDSUB231PS256, + ssa.OpAMD64VFMADDSUB231PS512, ssa.OpAMD64VFMADDSUB231PD128, ssa.OpAMD64VFMADDSUB231PD256, ssa.OpAMD64VFMADDSUB231PD512, - ssa.OpAMD64VFMSUB132PS512, ssa.OpAMD64VFMSUB132PS128, ssa.OpAMD64VFMSUB132PS256, + ssa.OpAMD64VFMSUB132PS512, ssa.OpAMD64VFMSUB132PD128, ssa.OpAMD64VFMSUB132PD256, ssa.OpAMD64VFMSUB132PD512, - ssa.OpAMD64VFMSUB213PS512, ssa.OpAMD64VFMSUB213PS128, ssa.OpAMD64VFMSUB213PS256, + ssa.OpAMD64VFMSUB213PS512, ssa.OpAMD64VFMSUB213PD128, ssa.OpAMD64VFMSUB213PD256, ssa.OpAMD64VFMSUB213PD512, - ssa.OpAMD64VFMSUB231PS512, ssa.OpAMD64VFMSUB231PS128, ssa.OpAMD64VFMSUB231PS256, + ssa.OpAMD64VFMSUB231PS512, ssa.OpAMD64VFMSUB231PD128, ssa.OpAMD64VFMSUB231PD256, ssa.OpAMD64VFMSUB231PD512, - ssa.OpAMD64VFMSUBADD132PS512, ssa.OpAMD64VFMSUBADD132PS128, ssa.OpAMD64VFMSUBADD132PS256, + ssa.OpAMD64VFMSUBADD132PS512, ssa.OpAMD64VFMSUBADD132PD128, ssa.OpAMD64VFMSUBADD132PD256, ssa.OpAMD64VFMSUBADD132PD512, - ssa.OpAMD64VFMSUBADD213PS512, ssa.OpAMD64VFMSUBADD213PS128, ssa.OpAMD64VFMSUBADD213PS256, + ssa.OpAMD64VFMSUBADD213PS512, ssa.OpAMD64VFMSUBADD213PD128, ssa.OpAMD64VFMSUBADD213PD256, ssa.OpAMD64VFMSUBADD213PD512, - ssa.OpAMD64VFMSUBADD231PS512, ssa.OpAMD64VFMSUBADD231PS128, ssa.OpAMD64VFMSUBADD231PS256, + ssa.OpAMD64VFMSUBADD231PS512, ssa.OpAMD64VFMSUBADD231PD128, ssa.OpAMD64VFMSUBADD231PD256, ssa.OpAMD64VFMSUBADD231PD512, - ssa.OpAMD64VFNMADD132PS512, ssa.OpAMD64VFNMADD132PS128, ssa.OpAMD64VFNMADD132PS256, + ssa.OpAMD64VFNMADD132PS512, ssa.OpAMD64VFNMADD132PD128, ssa.OpAMD64VFNMADD132PD256, ssa.OpAMD64VFNMADD132PD512, - ssa.OpAMD64VFNMADD213PS512, ssa.OpAMD64VFNMADD213PS128, ssa.OpAMD64VFNMADD213PS256, + ssa.OpAMD64VFNMADD213PS512, ssa.OpAMD64VFNMADD213PD128, ssa.OpAMD64VFNMADD213PD256, ssa.OpAMD64VFNMADD213PD512, - ssa.OpAMD64VFNMADD231PS512, ssa.OpAMD64VFNMADD231PS128, ssa.OpAMD64VFNMADD231PS256, + ssa.OpAMD64VFNMADD231PS512, ssa.OpAMD64VFNMADD231PD128, ssa.OpAMD64VFNMADD231PD256, ssa.OpAMD64VFNMADD231PD512, - ssa.OpAMD64VFNMSUB132PS512, ssa.OpAMD64VFNMSUB132PS128, ssa.OpAMD64VFNMSUB132PS256, + ssa.OpAMD64VFNMSUB132PS512, ssa.OpAMD64VFNMSUB132PD128, ssa.OpAMD64VFNMSUB132PD256, ssa.OpAMD64VFNMSUB132PD512, - ssa.OpAMD64VFNMSUB213PS512, ssa.OpAMD64VFNMSUB213PS128, ssa.OpAMD64VFNMSUB213PS256, + ssa.OpAMD64VFNMSUB213PS512, ssa.OpAMD64VFNMSUB213PD128, ssa.OpAMD64VFNMSUB213PD256, ssa.OpAMD64VFNMSUB213PD512, - ssa.OpAMD64VFNMSUB231PS512, ssa.OpAMD64VFNMSUB231PS128, ssa.OpAMD64VFNMSUB231PS256, + ssa.OpAMD64VFNMSUB231PS512, ssa.OpAMD64VFNMSUB231PD128, ssa.OpAMD64VFNMSUB231PD256, ssa.OpAMD64VFNMSUB231PD512, @@ -801,126 +776,126 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdFp31ResultInArg0(s, v) - case ssa.OpAMD64VFMADD132PSMasked512, - ssa.OpAMD64VFMADD132PSMasked128, + case ssa.OpAMD64VFMADD132PSMasked128, ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PSMasked512, ssa.OpAMD64VFMADD132PDMasked128, ssa.OpAMD64VFMADD132PDMasked256, ssa.OpAMD64VFMADD132PDMasked512, - ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PSMasked128, ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PDMasked128, ssa.OpAMD64VFMADD231PDMasked256, ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PSMasked128, ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PDMasked128, ssa.OpAMD64VFMADDSUB132PDMasked256, ssa.OpAMD64VFMADDSUB132PDMasked512, - ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PSMasked128, ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PDMasked128, ssa.OpAMD64VFMADDSUB231PDMasked256, ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PSMasked128, ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PDMasked128, ssa.OpAMD64VFMSUB132PDMasked256, ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PSMasked128, ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PDMasked128, ssa.OpAMD64VFMSUB213PDMasked256, ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PSMasked128, ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PDMasked128, ssa.OpAMD64VFMSUB231PDMasked256, ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PSMasked128, ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PDMasked128, ssa.OpAMD64VFMSUBADD132PDMasked256, ssa.OpAMD64VFMSUBADD132PDMasked512, - ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PSMasked128, ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PDMasked128, ssa.OpAMD64VFMSUBADD231PDMasked256, ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PSMasked128, ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PDMasked128, ssa.OpAMD64VFNMADD132PDMasked256, ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PSMasked128, ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PDMasked128, ssa.OpAMD64VFNMADD213PDMasked256, ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PSMasked128, ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PDMasked128, ssa.OpAMD64VFNMADD231PDMasked256, ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PSMasked128, ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PDMasked128, ssa.OpAMD64VFNMSUB132PDMasked256, ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PSMasked128, ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PDMasked128, ssa.OpAMD64VFNMSUB213PDMasked256, ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PSMasked128, ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PDMasked128, ssa.OpAMD64VFNMSUB231PDMasked256, ssa.OpAMD64VFNMSUB231PDMasked512, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked256: + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked512: p = simdFp3k1fp1ResultInArg0(s, v) default: @@ -930,273 +905,273 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { // Masked operation are always compiled with zeroing. switch v.Op { - case ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPABSWMasked512, + case ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSDMasked128, ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSDMasked512, ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VADDPSMasked512, ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPSMasked512, ssa.OpAMD64VADDPDMasked128, ssa.OpAMD64VADDPDMasked256, ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDWMasked128, ssa.OpAMD64VPADDWMasked256, ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDDMasked128, ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPSMasked128, ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPDMasked128, ssa.OpAMD64VANDPDMasked256, ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPSMasked128, ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPDMasked128, ssa.OpAMD64VANDNPDMasked256, ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNQMasked128, ssa.OpAMD64VPANDNQMasked256, ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PDMasked128, ssa.OpAMD64VRCP14PDMasked256, ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PSMasked128, ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VRNDSCALEPSMasked512, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPSMasked512, ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, - ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512, - ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VFMADD132PSMasked512, ssa.OpAMD64VFMADD132PSMasked128, ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PSMasked512, ssa.OpAMD64VFMADD132PDMasked128, ssa.OpAMD64VFMADD132PDMasked256, ssa.OpAMD64VFMADD132PDMasked512, - ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PSMasked128, ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PDMasked128, ssa.OpAMD64VFMADD231PDMasked256, ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PSMasked128, ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PDMasked128, ssa.OpAMD64VFMADDSUB132PDMasked256, ssa.OpAMD64VFMADDSUB132PDMasked512, - ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PSMasked128, ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PDMasked128, ssa.OpAMD64VFMADDSUB231PDMasked256, ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PSMasked128, ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PDMasked128, ssa.OpAMD64VFMSUB132PDMasked256, ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PSMasked128, ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PDMasked128, ssa.OpAMD64VFMSUB213PDMasked256, ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PSMasked128, ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PDMasked128, ssa.OpAMD64VFMSUB231PDMasked256, ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PSMasked128, ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PDMasked128, ssa.OpAMD64VFMSUBADD132PDMasked256, ssa.OpAMD64VFMSUBADD132PDMasked512, - ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PSMasked128, ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PDMasked128, ssa.OpAMD64VFMSUBADD231PDMasked256, ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PSMasked128, ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PDMasked128, ssa.OpAMD64VFNMADD132PDMasked256, ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PSMasked128, ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PDMasked128, ssa.OpAMD64VFNMADD213PDMasked256, ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PSMasked128, ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PDMasked128, ssa.OpAMD64VFNMADD231PDMasked256, ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PSMasked128, ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PDMasked128, ssa.OpAMD64VFNMSUB132PDMasked256, ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PSMasked128, ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PDMasked128, ssa.OpAMD64VFNMSUB213PDMasked256, ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PSMasked128, ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PDMasked128, ssa.OpAMD64VFNMSUB231PDMasked256, ssa.OpAMD64VFNMSUB231PDMasked512, - ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPDMasked128, ssa.OpAMD64VMAXPDMasked256, ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXSWMasked128, ssa.OpAMD64VPMAXSWMasked256, ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSQMasked128, ssa.OpAMD64VPMAXSQMasked256, ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMAXUWMasked128, ssa.OpAMD64VPMAXUWMasked256, ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUDMasked128, ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUQMasked128, ssa.OpAMD64VPMAXUQMasked256, ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPSMasked128, ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPDMasked128, ssa.OpAMD64VMINPDMasked256, ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINSWMasked128, ssa.OpAMD64VPMINSWMasked256, ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSDMasked128, ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSQMasked128, ssa.OpAMD64VPMINSQMasked256, ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMINUWMasked128, ssa.OpAMD64VPMINUWMasked256, ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUDMasked128, ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPDMasked128, ssa.OpAMD64VMULPDMasked256, ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPDMasked128, ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, @@ -1206,102 +1181,108 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULUDQMasked128, ssa.OpAMD64VPMULUDQMasked256, ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLDMasked128, ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPDMasked128, ssa.OpAMD64VORPDMasked256, ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPOPCNTWMasked128, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTDMasked128, ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, - ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VSUBPSMasked128, + ssa.OpAMD64VSUBPSMasked256, + ssa.OpAMD64VSUBPSMasked512, + ssa.OpAMD64VSUBPDMasked128, + ssa.OpAMD64VSUBPDMasked256, + ssa.OpAMD64VSUBPDMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPSUBWMasked128, ssa.OpAMD64VPSUBWMasked256, ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBDMasked128, ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPDPBUSDMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPDPBUSDMasked512, ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPDMasked128, ssa.OpAMD64VXORPDMasked256, ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index add066a3b6dcff..d6d8246980a8bf 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -194,17 +194,17 @@ (EqualFloat64x4 x y) => (VCMPPD256 [0] x y) (EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) (EqualInt16x16 ...) => (VPCMPEQW256 ...) -(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) (EqualInt16x8 ...) => (VPCMPEQW128 ...) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) (EqualInt32x4 ...) => (VPCMPEQD128 ...) (EqualInt32x8 ...) => (VPCMPEQD256 ...) (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) -(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) (EqualInt8x16 ...) => (VPCMPEQB128 ...) (EqualInt8x32 ...) => (VPCMPEQB256 ...) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) (EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) (EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) (EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) @@ -348,17 +348,17 @@ (GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) (GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 x y)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) (GreaterInt16x8 ...) => (VPCMPGTW128 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPGTD512 x y)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) (GreaterInt8x16 ...) => (VPCMPGTB128 ...) (GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPGTB512 x y)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) (GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) (GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) (GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) @@ -635,18 +635,18 @@ (MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) -(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) -(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) -(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) -(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) -(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) -(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) (MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) (MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) (MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) @@ -785,18 +785,18 @@ (MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) (MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) (MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) @@ -1130,12 +1130,12 @@ (MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) @@ -1473,12 +1473,12 @@ (SqrtFloat64x2 ...) => (VSQRTPD128 ...) (SqrtFloat64x4 ...) => (VSQRTPD256 ...) (SqrtFloat64x8 ...) => (VSQRTPD512 ...) -(SubFloat32x16 ...) => (VADDPS512 ...) -(SubFloat32x4 ...) => (VADDPS128 ...) -(SubFloat32x8 ...) => (VADDPS256 ...) -(SubFloat64x2 ...) => (VADDPD128 ...) -(SubFloat64x4 ...) => (VADDPD256 ...) -(SubFloat64x8 ...) => (VADDPD512 ...) +(SubFloat32x16 ...) => (VSUBPS512 ...) +(SubFloat32x4 ...) => (VSUBPS128 ...) +(SubFloat32x8 ...) => (VSUBPS256 ...) +(SubFloat64x2 ...) => (VSUBPD128 ...) +(SubFloat64x4 ...) => (VSUBPD256 ...) +(SubFloat64x8 ...) => (VSUBPD512 ...) (SubInt16x16 ...) => (VPSUBW256 ...) (SubInt16x32 ...) => (VPSUBW512 ...) (SubInt16x8 ...) => (VPSUBW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 651a4365c7c09d..17d250421f38d4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -57,6 +57,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -64,6 +65,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -120,6 +122,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -129,6 +132,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -185,6 +189,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -194,6 +199,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -250,6 +256,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -259,6 +266,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -315,6 +323,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -324,6 +333,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -379,6 +389,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -386,17 +397,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -410,7 +418,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -421,15 +428,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQW512", argLength: 2, reg: fp2k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTW512", argLength: 2, reg: fp2k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -450,14 +452,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -471,7 +469,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -482,19 +479,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQD512", argLength: 2, reg: fp2k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTD512", argLength: 2, reg: fp2k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -525,8 +517,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -559,8 +549,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -588,13 +576,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTQ128", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -616,8 +601,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -635,14 +618,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQQ512", argLength: 2, reg: fp2k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQ512", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -661,12 +640,12 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -675,19 +654,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -696,19 +677,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQB512", argLength: 2, reg: fp2k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTB512", argLength: 2, reg: fp2k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -841,29 +820,29 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VREDUCEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW256", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW512", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d2e86702d838f0..ac47bad525e80a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1250,6 +1250,7 @@ const ( OpAMD64VSCALEFPSMasked512 OpAMD64VORPSMasked512 OpAMD64VSQRTPSMasked512 + OpAMD64VSUBPSMasked512 OpAMD64VXORPSMasked512 OpAMD64VMAXPS512 OpAMD64VMINPS512 @@ -1257,6 +1258,7 @@ const ( OpAMD64VSCALEFPS512 OpAMD64VORPS512 OpAMD64VSQRTPS512 + OpAMD64VSUBPS512 OpAMD64VXORPS512 OpAMD64VADDPS128 OpAMD64VADDSUBPS128 @@ -1313,6 +1315,7 @@ const ( OpAMD64VSCALEFPSMasked128 OpAMD64VORPSMasked128 OpAMD64VSQRTPSMasked128 + OpAMD64VSUBPSMasked128 OpAMD64VXORPSMasked128 OpAMD64VMAXPS128 OpAMD64VMINPS128 @@ -1322,6 +1325,7 @@ const ( OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 + OpAMD64VSUBPS128 OpAMD64VXORPS128 OpAMD64VADDPS256 OpAMD64VADDSUBPS256 @@ -1378,6 +1382,7 @@ const ( OpAMD64VSCALEFPSMasked256 OpAMD64VORPSMasked256 OpAMD64VSQRTPSMasked256 + OpAMD64VSUBPSMasked256 OpAMD64VXORPSMasked256 OpAMD64VMAXPS256 OpAMD64VMINPS256 @@ -1387,6 +1392,7 @@ const ( OpAMD64VHADDPS256 OpAMD64VHSUBPS256 OpAMD64VSQRTPS256 + OpAMD64VSUBPS256 OpAMD64VXORPS256 OpAMD64VADDPD128 OpAMD64VADDSUBPD128 @@ -1443,6 +1449,7 @@ const ( OpAMD64VSCALEFPDMasked128 OpAMD64VORPDMasked128 OpAMD64VSQRTPDMasked128 + OpAMD64VSUBPDMasked128 OpAMD64VXORPDMasked128 OpAMD64VMAXPD128 OpAMD64VMINPD128 @@ -1452,6 +1459,7 @@ const ( OpAMD64VHADDPD128 OpAMD64VHSUBPD128 OpAMD64VSQRTPD128 + OpAMD64VSUBPD128 OpAMD64VXORPD128 OpAMD64VADDPD256 OpAMD64VADDSUBPD256 @@ -1508,6 +1516,7 @@ const ( OpAMD64VSCALEFPDMasked256 OpAMD64VORPDMasked256 OpAMD64VSQRTPDMasked256 + OpAMD64VSUBPDMasked256 OpAMD64VXORPDMasked256 OpAMD64VMAXPD256 OpAMD64VMINPD256 @@ -1517,6 +1526,7 @@ const ( OpAMD64VHADDPD256 OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 + OpAMD64VSUBPD256 OpAMD64VXORPD256 OpAMD64VADDPD512 OpAMD64VANDPD512 @@ -1572,6 +1582,7 @@ const ( OpAMD64VSCALEFPDMasked512 OpAMD64VORPDMasked512 OpAMD64VSQRTPDMasked512 + OpAMD64VSUBPDMasked512 OpAMD64VXORPDMasked512 OpAMD64VMAXPD512 OpAMD64VMINPD512 @@ -1579,17 +1590,14 @@ const ( OpAMD64VSCALEFPD512 OpAMD64VORPD512 OpAMD64VSQRTPD512 + OpAMD64VSUBPD512 OpAMD64VXORPD512 OpAMD64VPABSW256 OpAMD64VPADDW256 - OpAMD64VPAND256 - OpAMD64VPANDN256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 OpAMD64VPABSWMasked256 OpAMD64VPADDWMasked256 - OpAMD64VPCMPEQWMasked256 - OpAMD64VPCMPGTWMasked256 OpAMD64VPMAXSWMasked256 OpAMD64VPMINSWMasked256 OpAMD64VPMULHWMasked256 @@ -1603,7 +1611,6 @@ const ( OpAMD64VPMINSW256 OpAMD64VPMULHW256 OpAMD64VPMULLW256 - OpAMD64VPOR256 OpAMD64VPMADDWD256 OpAMD64VPHADDW256 OpAMD64VPHSUBW256 @@ -1614,15 +1621,10 @@ const ( OpAMD64VPSUBSW256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 - OpAMD64VPXOR256 OpAMD64VPABSW512 OpAMD64VPADDW512 - OpAMD64VPCMPEQW512 - OpAMD64VPCMPGTW512 OpAMD64VPABSWMasked512 OpAMD64VPADDWMasked512 - OpAMD64VPCMPEQWMasked512 - OpAMD64VPCMPGTWMasked512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSWMasked512 OpAMD64VPMULHWMasked512 @@ -1643,14 +1645,10 @@ const ( OpAMD64VPSUBW512 OpAMD64VPABSW128 OpAMD64VPADDW128 - OpAMD64VPAND128 - OpAMD64VPANDN128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 OpAMD64VPABSWMasked128 OpAMD64VPADDWMasked128 - OpAMD64VPCMPEQWMasked128 - OpAMD64VPCMPGTWMasked128 OpAMD64VPMAXSWMasked128 OpAMD64VPMINSWMasked128 OpAMD64VPMULHWMasked128 @@ -1664,7 +1662,6 @@ const ( OpAMD64VPMINSW128 OpAMD64VPMULHW128 OpAMD64VPMULLW128 - OpAMD64VPOR128 OpAMD64VPMADDWD128 OpAMD64VPHADDW128 OpAMD64VPHSUBW128 @@ -1675,19 +1672,14 @@ const ( OpAMD64VPSUBSW128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 - OpAMD64VPXOR128 OpAMD64VPABSD512 OpAMD64VPADDD512 OpAMD64VPANDD512 OpAMD64VPANDND512 - OpAMD64VPCMPEQD512 - OpAMD64VPCMPGTD512 OpAMD64VPABSDMasked512 OpAMD64VPADDDMasked512 OpAMD64VPANDDMasked512 OpAMD64VPANDNDMasked512 - OpAMD64VPCMPEQDMasked512 - OpAMD64VPCMPGTDMasked512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSDMasked512 OpAMD64VPMULLDMasked512 @@ -1718,8 +1710,6 @@ const ( OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 OpAMD64VPANDNDMasked128 - OpAMD64VPCMPEQDMasked128 - OpAMD64VPCMPGTDMasked128 OpAMD64VPMAXSDMasked128 OpAMD64VPMINSDMasked128 OpAMD64VPMULLDMasked128 @@ -1752,8 +1742,6 @@ const ( OpAMD64VPADDDMasked256 OpAMD64VPANDDMasked256 OpAMD64VPANDNDMasked256 - OpAMD64VPCMPEQDMasked256 - OpAMD64VPCMPGTDMasked256 OpAMD64VPMAXSDMasked256 OpAMD64VPMINSDMasked256 OpAMD64VPMULLDMasked256 @@ -1781,13 +1769,10 @@ const ( OpAMD64VPABSQ128 OpAMD64VPADDQ128 OpAMD64VPCMPEQQ128 - OpAMD64VPCMPGTQ128 OpAMD64VPABSQMasked128 OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 - OpAMD64VPCMPEQQMasked128 - OpAMD64VPCMPGTQMasked128 OpAMD64VPMAXSQMasked128 OpAMD64VPMINSQMasked128 OpAMD64VPMULDQMasked128 @@ -1809,8 +1794,6 @@ const ( OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 - OpAMD64VPCMPEQQMasked256 - OpAMD64VPCMPGTQMasked256 OpAMD64VPMAXSQMasked256 OpAMD64VPMINSQMasked256 OpAMD64VPMULDQMasked256 @@ -1828,14 +1811,10 @@ const ( OpAMD64VPADDQ512 OpAMD64VPANDQ512 OpAMD64VPANDNQ512 - OpAMD64VPCMPEQQ512 - OpAMD64VPCMPGTQ512 OpAMD64VPABSQMasked512 OpAMD64VPADDQMasked512 OpAMD64VPANDQMasked512 OpAMD64VPANDNQMasked512 - OpAMD64VPCMPEQQMasked512 - OpAMD64VPCMPGTQMasked512 OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQMasked512 OpAMD64VPMULDQMasked512 @@ -1854,12 +1833,12 @@ const ( OpAMD64VPXORQ512 OpAMD64VPABSB128 OpAMD64VPADDB128 + OpAMD64VPAND128 + OpAMD64VPANDN128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 OpAMD64VPABSBMasked128 OpAMD64VPADDBMasked128 - OpAMD64VPCMPEQBMasked128 - OpAMD64VPCMPGTBMasked128 OpAMD64VPMAXSBMasked128 OpAMD64VPMINSBMasked128 OpAMD64VPOPCNTBMasked128 @@ -1868,19 +1847,21 @@ const ( OpAMD64VPSUBBMasked128 OpAMD64VPMAXSB128 OpAMD64VPMINSB128 + OpAMD64VPOR128 OpAMD64VPOPCNTB128 OpAMD64VPADDSB128 OpAMD64VPSUBSB128 OpAMD64VPSIGNB128 OpAMD64VPSUBB128 + OpAMD64VPXOR128 OpAMD64VPABSB256 OpAMD64VPADDB256 + OpAMD64VPAND256 + OpAMD64VPANDN256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 OpAMD64VPABSBMasked256 OpAMD64VPADDBMasked256 - OpAMD64VPCMPEQBMasked256 - OpAMD64VPCMPGTBMasked256 OpAMD64VPMAXSBMasked256 OpAMD64VPMINSBMasked256 OpAMD64VPOPCNTBMasked256 @@ -1889,19 +1870,17 @@ const ( OpAMD64VPSUBBMasked256 OpAMD64VPMAXSB256 OpAMD64VPMINSB256 + OpAMD64VPOR256 OpAMD64VPOPCNTB256 OpAMD64VPADDSB256 OpAMD64VPSUBSB256 OpAMD64VPSIGNB256 OpAMD64VPSUBB256 + OpAMD64VPXOR256 OpAMD64VPABSB512 OpAMD64VPADDB512 - OpAMD64VPCMPEQB512 - OpAMD64VPCMPGTB512 OpAMD64VPABSBMasked512 OpAMD64VPADDBMasked512 - OpAMD64VPCMPEQBMasked512 - OpAMD64VPCMPGTBMasked512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSBMasked512 OpAMD64VPOPCNTBMasked512 @@ -19314,6 +19293,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPSMasked512", + argLen: 3, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPSMasked512", argLen: 3, @@ -19417,6 +19411,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPS512", + argLen: 2, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPS512", argLen: 2, @@ -20307,6 +20315,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPSMasked128", + argLen: 3, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPSMasked128", argLen: 3, @@ -20438,6 +20461,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPS128", + argLen: 2, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPS128", argLen: 2, @@ -21328,6 +21365,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPSMasked256", + argLen: 3, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPSMasked256", argLen: 3, @@ -21459,6 +21511,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPS256", + argLen: 2, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPS256", argLen: 2, @@ -22349,6 +22415,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPDMasked128", argLen: 3, @@ -22480,6 +22561,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPD128", + argLen: 2, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPD128", argLen: 2, @@ -23370,6 +23465,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPDMasked256", + argLen: 3, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPDMasked256", argLen: 3, @@ -23501,6 +23611,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPD256", + argLen: 2, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPD256", argLen: 2, @@ -24377,6 +24501,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPDMasked512", + argLen: 3, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPDMasked512", argLen: 3, @@ -24481,10 +24620,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD512", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, + name: "VSUBPD512", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24496,23 +24634,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDW256", + name: "VXORPD512", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24524,14 +24649,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", - argLen: 2, - commutative: true, - asm: x86.AVPAND, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24539,9 +24662,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - asm: x86.AVPANDN, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24611,37 +24735,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTWMasked256", - argLen: 3, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSWMasked256", argLen: 3, @@ -24841,21 +24934,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMADDWD256", argLen: 2, @@ -24996,21 +25074,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPXOR256", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPABSW512", argLen: 1, @@ -25040,38 +25103,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTW512", - argLen: 2, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPABSWMasked512", - argLen: 2, - asm: x86.AVPABSW, + name: "VPABSWMasked512", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25098,37 +25132,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTWMasked512", - argLen: 3, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSWMasked512", argLen: 3, @@ -25426,35 +25429,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPAND128", - argLen: 2, - commutative: true, - asm: x86.AVPAND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPANDN128", - argLen: 2, - asm: x86.AVPANDN, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPCMPEQW128", argLen: 2, @@ -25514,37 +25488,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTWMasked128", - argLen: 3, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSWMasked128", argLen: 3, @@ -25744,21 +25687,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMADDWD128", argLen: 2, @@ -25899,21 +25827,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPXOR128", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPABSD512", argLen: 1, @@ -25971,35 +25884,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQD512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTD512", - argLen: 2, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPABSDMasked512", argLen: 2, @@ -26061,37 +25945,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTDMasked512", - argLen: 3, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSDMasked512", argLen: 3, @@ -26553,37 +26406,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTDMasked128", - argLen: 3, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSDMasked128", argLen: 3, @@ -27072,37 +26894,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTDMasked256", - argLen: 3, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSDMasked256", argLen: 3, @@ -27516,20 +27307,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPGTQ128", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPABSQMasked128", argLen: 2, @@ -27591,37 +27368,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked128", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSQMasked128", argLen: 3, @@ -27937,37 +27683,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked256", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSQMasked256", argLen: 3, @@ -28222,35 +27937,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQ512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQ512", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPABSQMasked512", argLen: 2, @@ -28312,37 +27998,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked512", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSQMasked512", argLen: 3, @@ -28614,10 +28269,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB128", + name: "VPAND128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28629,9 +28284,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB128", + name: "VPANDN128", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28643,13 +28298,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", - argLen: 2, - asm: x86.AVPABSB, + name: "VPCMPEQB128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28657,15 +28313,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28673,25 +28327,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTBMasked128", - argLen: 3, - asm: x86.AVPCMPGTB, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28699,7 +28352,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -28825,6 +28478,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOR128", + argLen: 2, + commutative: true, + asm: x86.AVPOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTB128", argLen: 1, @@ -28895,6 +28563,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSB256", argLen: 1, @@ -28924,10 +28607,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", + name: "VPAND256", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28939,9 +28622,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", + name: "VPANDN256", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28953,13 +28636,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, + name: "VPCMPEQB256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28967,15 +28651,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28983,25 +28665,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTBMasked256", - argLen: 3, - asm: x86.AVPCMPGTB, + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29009,7 +28690,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -29135,6 +28816,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTB256", argLen: 1, @@ -29206,12 +28902,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", - argLen: 1, - asm: x86.AVPABSB, + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29219,14 +28917,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29234,31 +28930,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB512", + name: "VPADDB512", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTB512", - argLen: 2, - asm: x86.AVPCMPGTB, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29292,37 +28974,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTBMasked512", - argLen: 3, - asm: x86.AVPCMPGTB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSBMasked512", argLen: 3, @@ -31338,10 +30989,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPW, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31354,10 +31006,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31369,10 +31022,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPW, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31400,10 +31054,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPW, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31416,10 +31071,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31431,10 +31087,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPD, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31462,10 +31119,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPD, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31493,10 +31151,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPD, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31524,10 +31183,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPQ, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31555,10 +31215,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPQ, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31571,10 +31232,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31586,10 +31248,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPQ, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31617,10 +31280,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPB, + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31648,10 +31312,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPB, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31664,10 +31329,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPB, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31679,10 +31345,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPB, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e9bafe2a1b400f..80d8eef8733ecf 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4584,22 +4584,22 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SUBL return true case OpSubFloat32x16: - v.Op = OpAMD64VADDPS512 + v.Op = OpAMD64VSUBPS512 return true case OpSubFloat32x4: - v.Op = OpAMD64VADDPS128 + v.Op = OpAMD64VSUBPS128 return true case OpSubFloat32x8: - v.Op = OpAMD64VADDPS256 + v.Op = OpAMD64VSUBPS256 return true case OpSubFloat64x2: - v.Op = OpAMD64VADDPD128 + v.Op = OpAMD64VSUBPD128 return true case OpSubFloat64x4: - v.Op = OpAMD64VADDPD256 + v.Op = OpAMD64VSUBPD256 return true case OpSubFloat64x8: - v.Op = OpAMD64VADDPD512 + v.Op = OpAMD64VSUBPD512 return true case OpSubInt16x16: v.Op = OpAMD64VPSUBW256 @@ -30476,12 +30476,13 @@ func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -30493,12 +30494,13 @@ func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -30510,12 +30512,13 @@ func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -30527,12 +30530,13 @@ func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31623,12 +31627,13 @@ func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31640,12 +31645,13 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31657,12 +31663,13 @@ func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31674,12 +31681,13 @@ func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31691,12 +31699,13 @@ func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37259,13 +37268,14 @@ func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37280,13 +37290,14 @@ func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37301,13 +37312,14 @@ func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37322,13 +37334,14 @@ func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37343,13 +37356,14 @@ func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37364,13 +37378,14 @@ func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37385,13 +37400,14 @@ func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37406,13 +37422,14 @@ func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37427,13 +37444,14 @@ func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37448,13 +37466,14 @@ func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37469,13 +37488,14 @@ func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37490,13 +37510,14 @@ func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40943,13 +40964,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40964,13 +40986,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40985,13 +41008,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41006,13 +41030,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41027,13 +41052,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41048,13 +41074,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41069,13 +41096,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41090,13 +41118,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41111,13 +41140,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41132,13 +41162,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41153,13 +41184,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41174,13 +41206,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -47044,12 +47077,12 @@ func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat32x16 x y mask) - // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) + // result: (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked512) + v.reset(OpAMD64VSUBPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47062,12 +47095,12 @@ func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat32x4 x y mask) - // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) + // result: (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked128) + v.reset(OpAMD64VSUBPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47080,12 +47113,12 @@ func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat32x8 x y mask) - // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) + // result: (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked256) + v.reset(OpAMD64VSUBPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47098,12 +47131,12 @@ func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat64x2 x y mask) - // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) + // result: (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked128) + v.reset(OpAMD64VSUBPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47116,12 +47149,12 @@ func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat64x4 x y mask) - // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) + // result: (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked256) + v.reset(OpAMD64VSUBPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47134,12 +47167,12 @@ func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat64x8 x y mask) - // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) + // result: (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked512) + v.reset(OpAMD64VSUBPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index f5492ac6e8e6b4..b86c81516601bc 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1370,195 +1370,195 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) @@ -1832,12 +1832,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) @@ -1846,26 +1846,26 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) @@ -1874,20 +1874,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) @@ -1900,22 +1904,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index cf37b5efcedd17..65332bf3fa7078 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -4,1132 +4,1067 @@ package simd -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocal() Float32x16 +/* Absolute */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 +// Asm: VPABSB, CPU Feature: AVX +func (x Int8x16) Absolute() Int8x16 -// Sqrt computes the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) Sqrt() Float32x16 +// Asm: VPABSB, CPU Feature: AVX2 +func (x Int8x32) Absolute() Int8x32 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) ApproximateReciprocal() Float32x4 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) Absolute() Int8x64 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 +// Asm: VPABSW, CPU Feature: AVX +func (x Int16x8) Absolute() Int16x8 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Ceil() Float32x4 +// Asm: VPABSW, CPU Feature: AVX2 +func (x Int16x16) Absolute() Int16x16 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Floor() Float32x4 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) Absolute() Int16x32 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 +// Asm: VPABSD, CPU Feature: AVX +func (x Int32x4) Absolute() Int32x4 -// Sqrt computes the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VSQRTPS, CPU Feature: AVX -func (x Float32x4) Sqrt() Float32x4 +// Asm: VPABSD, CPU Feature: AVX2 +func (x Int32x8) Absolute() Int32x8 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Trunc() Float32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) Absolute() Int32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) ApproximateReciprocal() Float32x8 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Absolute() Int64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Absolute() Int64x4 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Ceil() Float32x8 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Absolute() Int64x8 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Floor() Float32x8 +/* Add */ -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 +// Asm: VADDPS, CPU Feature: AVX +func (x Float32x4) Add(y Float32x4) Float32x4 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPS, CPU Feature: AVX -func (x Float32x8) Sqrt() Float32x8 +// Asm: VADDPS, CPU Feature: AVX +func (x Float32x8) Add(y Float32x8) Float32x8 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Trunc() Float32x8 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) Add(y Float32x16) Float32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocal() Float64x2 +// Asm: VADDPD, CPU Feature: AVX +func (x Float64x2) Add(y Float64x2) Float64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 +// Asm: VADDPD, CPU Feature: AVX +func (x Float64x4) Add(y Float64x4) Float64x4 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Ceil() Float64x2 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) Add(y Float64x8) Float64x8 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Floor() Float64x2 +// Asm: VPADDB, CPU Feature: AVX +func (x Int8x16) Add(y Int8x16) Int8x16 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 +// Asm: VPADDB, CPU Feature: AVX2 +func (x Int8x32) Add(y Int8x32) Int8x32 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPD, CPU Feature: AVX -func (x Float64x2) Sqrt() Float64x2 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) Add(y Int8x64) Int8x64 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Trunc() Float64x2 +// Asm: VPADDW, CPU Feature: AVX +func (x Int16x8) Add(y Int16x8) Int16x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocal() Float64x4 +// Asm: VPADDW, CPU Feature: AVX2 +func (x Int16x16) Add(y Int16x16) Int16x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) Add(y Int16x32) Int16x32 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Ceil() Float64x4 +// Asm: VPADDD, CPU Feature: AVX +func (x Int32x4) Add(y Int32x4) Int32x4 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Floor() Float64x4 +// Asm: VPADDD, CPU Feature: AVX2 +func (x Int32x8) Add(y Int32x8) Int32x8 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) Add(y Int32x16) Int32x16 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPD, CPU Feature: AVX -func (x Float64x4) Sqrt() Float64x4 +// Asm: VPADDQ, CPU Feature: AVX +func (x Int64x2) Add(y Int64x2) Int64x2 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Trunc() Float64x4 +// Asm: VPADDQ, CPU Feature: AVX2 +func (x Int64x4) Add(y Int64x4) Int64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocal() Float64x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) Add(y Int64x8) Int64x8 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 +// Asm: VPADDB, CPU Feature: AVX +func (x Uint8x16) Add(y Uint8x16) Uint8x16 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) Sqrt() Float64x8 +// Asm: VPADDB, CPU Feature: AVX2 +func (x Uint8x32) Add(y Uint8x32) Uint8x32 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSW, CPU Feature: AVX2 -func (x Int16x16) Absolute() Int16x16 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) Add(y Uint8x64) Uint8x64 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) PopCount() Int16x16 +// Asm: VPADDW, CPU Feature: AVX +func (x Uint16x8) Add(y Uint16x8) Uint16x8 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) Absolute() Int16x32 +// Asm: VPADDW, CPU Feature: AVX2 +func (x Uint16x16) Add(y Uint16x16) Uint16x16 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) PopCount() Int16x32 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) Add(y Uint16x32) Uint16x32 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSW, CPU Feature: AVX -func (x Int16x8) Absolute() Int16x8 +// Asm: VPADDD, CPU Feature: AVX +func (x Uint32x4) Add(y Uint32x4) Uint32x4 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) PopCount() Int16x8 +// Asm: VPADDD, CPU Feature: AVX2 +func (x Uint32x8) Add(y Uint32x8) Uint32x8 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) Absolute() Int32x16 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) Add(y Uint32x16) Uint32x16 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) PopCount() Int32x16 +// Asm: VPADDQ, CPU Feature: AVX +func (x Uint64x2) Add(y Uint64x2) Uint64x2 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSD, CPU Feature: AVX -func (x Int32x4) Absolute() Int32x4 +// Asm: VPADDQ, CPU Feature: AVX2 +func (x Uint64x4) Add(y Uint64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) PopCount() Int32x4 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Add(y Uint64x8) Uint64x8 -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX2 -func (x Int32x8) Absolute() Int32x8 +/* AddSub */ -// PopCount counts the number of set bits in each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) PopCount() Int32x8 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x4) AddSub(y Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Absolute() Int64x2 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x8) AddSub(y Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) PopCount() Int64x2 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x2) AddSub(y Float64x2) Float64x2 -// Absolute computes the absolute value of each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Absolute() Int64x4 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x4) AddSub(y Float64x4) Float64x4 -// PopCount counts the number of set bits in each element. +/* And */ + +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) PopCount() Int64x4 +// Asm: VANDPS, CPU Feature: AVX +func (x Float32x4) And(y Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Absolute() Int64x8 +// Asm: VANDPS, CPU Feature: AVX +func (x Float32x8) And(y Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) PopCount() Int64x8 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x16) And(y Float32x16) Float32x16 -// Absolute computes the absolute value of each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPABSB, CPU Feature: AVX -func (x Int8x16) Absolute() Int8x16 +// Asm: VANDPD, CPU Feature: AVX +func (x Float64x2) And(y Float64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) PopCount() Int8x16 +// Asm: VANDPD, CPU Feature: AVX +func (x Float64x4) And(y Float64x4) Float64x4 -// Absolute computes the absolute value of each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPABSB, CPU Feature: AVX2 -func (x Int8x32) Absolute() Int8x32 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x8) And(y Float64x8) Float64x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) PopCount() Int8x32 +// Asm: VPAND, CPU Feature: AVX +func (x Int8x16) And(y Int8x16) Int8x16 -// Absolute computes the absolute value of each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) Absolute() Int8x64 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int8x32) And(y Int8x32) Int8x32 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) PopCount() Int8x64 +// Asm: VPAND, CPU Feature: AVX +func (x Int16x8) And(y Int16x8) Int16x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) PopCount() Uint16x16 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int16x16) And(y Int16x16) Int16x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) PopCount() Uint16x32 +// Asm: VPAND, CPU Feature: AVX +func (x Int32x4) And(y Int32x4) Int32x4 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) PopCount() Uint16x8 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int32x8) And(y Int32x8) Int32x8 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) PopCount() Uint32x16 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) And(y Int32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) PopCount() Uint32x4 +// Asm: VPAND, CPU Feature: AVX +func (x Int64x2) And(y Int64x2) Int64x2 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) PopCount() Uint32x8 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int64x4) And(y Int64x4) Int64x4 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) PopCount() Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) And(y Int64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) PopCount() Uint64x4 +// Asm: VPAND, CPU Feature: AVX +func (x Uint8x16) And(y Uint8x16) Uint8x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) PopCount() Uint64x8 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint8x32) And(y Uint8x32) Uint8x32 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) PopCount() Uint8x16 +// Asm: VPAND, CPU Feature: AVX +func (x Uint16x8) And(y Uint16x8) Uint16x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) PopCount() Uint8x32 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint16x16) And(y Uint16x16) Uint16x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) PopCount() Uint8x64 +// Asm: VPAND, CPU Feature: AVX +func (x Uint32x4) And(y Uint32x4) Uint32x4 -// Add adds corresponding elements of two vectors. +// And performs a bitwise AND operation between two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) Add(y Float32x16) Float32x16 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint32x8) And(y Uint32x8) Uint32x8 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) And(y Float32x16) Float32x16 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) And(y Uint32x16) Uint32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// And performs a bitwise AND operation between two vectors. // -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) AndNot(y Float32x16) Float32x16 +// Asm: VPAND, CPU Feature: AVX +func (x Uint64x2) And(y Uint64x2) Uint64x2 -// Div divides elements of two vectors. +// And performs a bitwise AND operation between two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) Div(y Float32x16) Float32x16 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint64x4) And(y Uint64x4) Uint64x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Equal(y Float32x16) Mask32x16 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) And(y Uint64x8) Uint64x8 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Greater(y Float32x16) Mask32x16 +/* AndNot */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 +// Asm: VANDNPS, CPU Feature: AVX +func (x Float32x4) AndNot(y Float32x4) Float32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) IsNan(y Float32x16) Mask32x16 +// Asm: VANDNPS, CPU Feature: AVX +func (x Float32x8) AndNot(y Float32x8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Less(y Float32x16) Mask32x16 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x16) AndNot(y Float32x16) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) LessEqual(y Float32x16) Mask32x16 +// Asm: VANDNPD, CPU Feature: AVX +func (x Float64x2) AndNot(y Float64x2) Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 +// Asm: VANDNPD, CPU Feature: AVX +func (x Float64x4) AndNot(y Float64x4) Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x8) AndNot(y Float64x8) Float64x8 -// Sqrt computes the square root of each element. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int8x16) AndNot(y Int8x16) Int8x16 -// Max computes the maximum of corresponding elements. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) Max(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int8x32) AndNot(y Int8x32) Int8x32 -// Min computes the minimum of corresponding elements. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) Min(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int16x8) AndNot(y Int16x8) Int16x8 -// Mul multiplies corresponding elements of two vectors, masked. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) Mul(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int16x16) AndNot(y Int16x16) Int16x16 -// MulByPowOf2 multiplies elements by a power of 2. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int32x4) AndNot(y Int32x4) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) NotEqual(y Float32x16) Mask32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int32x8) AndNot(y Int32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Or(y Float32x16) Float32x16 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) AndNot(y Int32x16) Int32x16 -// Sub subtracts corresponding elements of two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) Sub(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int64x2) AndNot(y Int64x2) Int64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Xor(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int64x4) AndNot(y Int64x4) Int64x4 -// Add adds corresponding elements of two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x4) Add(y Float32x4) Float32x4 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x8) AndNot(y Int64x8) Int64x8 -// AddSub subtracts even elements and adds odd elements of two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VADDSUBPS, CPU Feature: AVX -func (x Float32x4) AddSub(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 -// And performs a bitwise AND operation between two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x4) And(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 // AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x4) AndNot(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 -// Div divides elements of two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x4) Div(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 -// Equal compares for equality. -// Const Immediate = 0. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Equal(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Greater(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) IsNan(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 -// Less compares for less than. -// Const Immediate = 1. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Less(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) LessEqual(y Float32x4) Mask32x4 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 + +/* ApproximateReciprocal */ // ApproximateReciprocal computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 +func (x Float32x4) ApproximateReciprocal() Float32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocal() Float32x8 -// Sqrt computes the square root of each element. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocal() Float32x16 -// Max computes the maximum of corresponding elements. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocal() Float64x2 -// Min computes the minimum of corresponding elements. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x4) Min(y Float32x4) Float32x4 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocal() Float64x4 -// Mul multiplies corresponding elements of two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x4) Mul(y Float32x4) Float32x4 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocal() Float64x8 -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 +/* ApproximateReciprocalOfSqrt */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) NotEqual(y Float32x4) Mask32x4 +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 -// Or performs a bitwise OR operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VORPS, CPU Feature: AVX -func (x Float32x4) Or(y Float32x4) Float32x4 +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 -// Sub subtracts corresponding elements of two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x4) Sub(y Float32x4) Float32x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 -// Xor performs a bitwise XOR operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x4) Xor(y Float32x4) Float32x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x8) Add(y Float32x8) Float32x8 +/* Average */ -// AddSub subtracts even elements and adds odd elements of two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VADDSUBPS, CPU Feature: AVX -func (x Float32x8) AddSub(y Float32x8) Float32x8 +// Asm: VPAVGB, CPU Feature: AVX +func (x Uint8x16) Average(y Uint8x16) Uint8x16 -// And performs a bitwise AND operation between two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x8) And(y Float32x8) Float32x8 +// Asm: VPAVGB, CPU Feature: AVX2 +func (x Uint8x32) Average(y Uint8x32) Uint8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x8) AndNot(y Float32x8) Float32x8 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) Average(y Uint8x64) Uint8x64 -// Div divides elements of two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x8) Div(y Float32x8) Float32x8 +// Asm: VPAVGW, CPU Feature: AVX +func (x Uint16x8) Average(y Uint16x8) Uint16x8 -// Equal compares for equality. -// Const Immediate = 0. +// Average computes the rounded average of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Equal(y Float32x8) Mask32x8 +// Asm: VPAVGW, CPU Feature: AVX2 +func (x Uint16x16) Average(y Uint16x16) Uint16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Average computes the rounded average of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Greater(y Float32x8) Mask32x8 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) Average(y Uint16x32) Uint16x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 +/* Ceil */ -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) IsNan(y Float32x8) Mask32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Ceil() Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Less(y Float32x8) Mask32x8 - -// LessEqual compares for less than or equal. +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Ceil() Float32x8 + +// Ceil rounds elements up to the nearest integer. // Const Immediate = 2. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) LessEqual(y Float32x8) Mask32x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Ceil() Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Ceil() Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 +/* CeilSuppressExceptionWithPrecision */ -// Sqrt computes the square root of each element. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// Max computes the maximum of corresponding elements. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// Min computes the minimum of corresponding elements. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x8) Min(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Mul multiplies corresponding elements of two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x8) Mul(y Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) NotEqual(y Float32x8) Mask32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX -func (x Float32x8) Or(y Float32x8) Float32x8 +/* CeilWithPrecision */ -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x8) Sub(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 -// Xor performs a bitwise XOR operation between two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x8) Xor(y Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 -// Add adds corresponding elements of two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x2) Add(y Float64x2) Float64x2 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 -// AddSub subtracts even elements and adds odd elements of two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VADDSUBPD, CPU Feature: AVX -func (x Float64x2) AddSub(y Float64x2) Float64x2 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x2) And(y Float64x2) Float64x2 +/* DiffWithCeilSuppressExceptionWithPrecision */ -// AndNot performs a bitwise AND NOT operation between two vectors. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x2) AndNot(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// Div divides elements of two vectors. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x2) Div(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// DotProdBroadcast multiplies all elements and broadcasts the sum. -// Const Immediate = 127. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VDPPD, CPU Feature: AVX -func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Equal compares for equality. -// Const Immediate = 0. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Equal(y Float64x2) Mask64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Greater(y Float64x2) Mask64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) IsNan(y Float64x2) Mask64x2 +/* DiffWithCeilWithPrecision */ -// Less compares for less than. -// Const Immediate = 1. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Less(y Float64x2) Mask64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 -// LessEqual compares for less than or equal. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) LessEqual(y Float64x2) Mask64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 -// Sqrt computes the square root of each element. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 -// Max computes the maximum of corresponding elements. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x2) Min(y Float64x2) Float64x2 +/* DiffWithFloorSuppressExceptionWithPrecision */ -// Mul multiplies corresponding elements of two vectors. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x2) Mul(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// MulByPowOf2 multiplies elements by a power of 2. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) NotEqual(y Float64x2) Mask64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Or performs a bitwise OR operation between two vectors. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x2) Or(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x2) Sub(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x2) Xor(y Float64x2) Float64x2 +/* DiffWithFloorWithPrecision */ -// Add adds corresponding elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x4) Add(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 -// AddSub subtracts even elements and adds odd elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VADDSUBPD, CPU Feature: AVX -func (x Float64x4) AddSub(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 -// And performs a bitwise AND operation between two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x4) And(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x4) AndNot(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 -// Div divides elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x4) Div(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 -// Equal compares for equality. -// Const Immediate = 0. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Equal(y Float64x4) Mask64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Greater(y Float64x4) Mask64x4 +/* DiffWithRoundSuppressExceptionWithPrecision */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) IsNan(y Float64x4) Mask64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Less(y Float64x4) Mask64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) LessEqual(y Float64x4) Mask64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 +/* DiffWithRoundWithPrecision */ -// Max computes the maximum of corresponding elements. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 -// Min computes the minimum of corresponding elements. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x4) Min(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 -// Mul multiplies corresponding elements of two vectors. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x4) Mul(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 -// MulByPowOf2 multiplies elements by a power of 2. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) NotEqual(y Float64x4) Mask64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 -// Or performs a bitwise OR operation between two vectors. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x4) Or(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 +/* DiffWithTruncSuppressExceptionWithPrecision */ -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x4) Sub(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// Xor performs a bitwise XOR operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x4) Xor(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Add adds corresponding elements of two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) Add(y Float64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// And performs a masked bitwise AND operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) And(y Float64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) AndNot(y Float64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) Div(y Float64x8) Float64x8 +/* DiffWithTruncWithPrecision */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Equal(y Float64x8) Mask64x8 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Greater(y Float64x8) Mask64x8 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) IsNan(y Float64x8) Mask64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Less(y Float64x8) Mask64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) LessEqual(y Float64x8) Mask64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 +/* Div */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Div divides elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x4) Div(y Float32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// Div divides elements of two vectors. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) Max(y Float64x8) Float64x8 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x8) Div(y Float32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// Div divides elements of two vectors. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) Min(y Float64x8) Float64x8 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) Div(y Float32x16) Float32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// Div divides elements of two vectors. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) Mul(y Float64x8) Float64x8 +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x2) Div(y Float64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// Div divides elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x4) Div(y Float64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Div divides elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) NotEqual(y Float64x8) Mask64x8 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) Div(y Float64x8) Float64x8 -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Or(y Float64x8) Float64x8 +/* DotProdBroadcast */ -// Sub subtracts corresponding elements of two vectors. +// DotProdBroadcast multiplies all elements and broadcasts the sum. +// Const Immediate = 127. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) Sub(y Float64x8) Float64x8 +// Asm: VDPPD, CPU Feature: AVX +func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Xor(y Float64x8) Float64x8 +/* Equal */ -// Add adds corresponding elements of two vectors. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPADDW, CPU Feature: AVX2 -func (x Int16x16) Add(y Int16x16) Int16x16 +// Asm: VPCMPEQB, CPU Feature: AVX +func (x Int8x16) Equal(y Int8x16) Mask8x16 -// And performs a bitwise AND operation between two vectors. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int16x16) And(y Int16x16) Int16x16 +// Asm: VPCMPEQB, CPU Feature: AVX2 +func (x Int8x32) Equal(y Int8x32) Mask8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int16x16) AndNot(y Int16x16) Int16x16 +// Asm: VPCMPEQW, CPU Feature: AVX +func (x Int16x8) Equal(y Int16x8) Mask16x8 // Equal compares for equality. // Const Immediate = 0. @@ -1137,1101 +1072,1065 @@ func (x Int16x16) AndNot(y Int16x16) Int16x16 // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPGTW, CPU Feature: AVX2 -func (x Int16x16) Greater(y Int16x16) Mask16x16 +// Asm: VPCMPEQD, CPU Feature: AVX +func (x Int32x4) Equal(y Int32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 +// Asm: VPCMPEQD, CPU Feature: AVX2 +func (x Int32x8) Equal(y Int32x8) Mask32x8 -// Less compares for less than. -// Const Immediate = 1. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) Less(y Int16x16) Mask16x16 +// Asm: VPCMPEQQ, CPU Feature: AVX +func (x Int64x2) Equal(y Int64x2) Mask64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 +// Asm: VPCMPEQQ, CPU Feature: AVX2 +func (x Int64x4) Equal(y Int64x4) Mask64x4 -// Absolute computes the absolute value of each element. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Equal(y Float32x4) Mask32x4 -// PopCount counts the number of set bits in each element. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Equal(y Float32x8) Mask32x8 -// Max computes the maximum of corresponding elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Equal(y Float32x16) Mask32x16 -// Min computes the minimum of corresponding elements. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPMINSW, CPU Feature: AVX2 -func (x Int16x16) Min(y Int16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Equal(y Float64x2) Mask64x2 -// MulHigh multiplies elements and stores the high part of the result. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Equal(y Float64x4) Mask64x4 -// MulLow multiplies elements and stores the low part of the result. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) MulLow(y Int16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Equal(y Float64x8) Mask64x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Equal(y Int8x64) Mask8x64 -// Or performs a bitwise OR operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int16x16) Or(y Int16x16) Int16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Equal(y Int16x32) Mask16x32 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Equal(y Int32x16) Mask32x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Equal(y Int64x8) Mask64x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Equal(y Uint8x16) Mask8x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Equal(y Uint8x32) Mask8x32 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Equal(y Uint16x8) Mask16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Equal(y Uint16x16) Mask16x16 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPSIGNW, CPU Feature: AVX2 -func (x Int16x16) Sign(y Int16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPSUBW, CPU Feature: AVX2 -func (x Int16x16) Sub(y Int16x16) Int16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Equal(y Uint32x4) Mask32x4 -// Xor performs a bitwise XOR operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int16x16) Xor(y Int16x16) Int16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Equal(y Uint32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) Add(y Int16x32) Int16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality, masked. // Const Immediate = 0. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x32) Equal(y Int16x32) Mask16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Equal(y Uint64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x32) Greater(y Int16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Equal(y Uint64x4) Mask64x4 -// Less compares for less than. -// Const Immediate = 1. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) Less(y Int16x32) Mask16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Equal(y Uint64x8) Mask64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) LessEqual(y Int16x32) Mask16x32 +/* Floor */ -// Absolute computes the absolute value of each element. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Floor() Float32x4 -// PopCount counts the number of set bits in each element. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Floor() Float32x8 -// Max computes the maximum of corresponding elements. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) Max(y Int16x32) Int16x32 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Floor() Float64x2 -// Min computes the minimum of corresponding elements. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) Min(y Int16x32) Int16x32 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Floor() Float64x4 -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +/* FloorSuppressExceptionWithPrecision */ -// MulLow multiplies elements and stores the low part of the result, masked. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MulLow(y Int16x32) Int16x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) NotEqual(y Int16x32) Mask16x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) Sub(y Int16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX -func (x Int16x8) Add(y Int16x8) Int16x8 +/* FloorWithPrecision */ -// And performs a bitwise AND operation between two vectors. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPAND, CPU Feature: AVX -func (x Int16x8) And(y Int16x8) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int16x8) AndNot(y Int16x8) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 -// Equal compares for equality. -// Const Immediate = 0. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPCMPEQW, CPU Feature: AVX -func (x Int16x8) Equal(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPCMPGTW, CPU Feature: AVX -func (x Int16x8) Greater(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 -// Less compares for less than. +// FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) Less(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 +/* FusedMultiplyAdd132 */ -// Absolute computes the absolute value of each element. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 -// PopCount counts the number of set bits in each element. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 -// Max computes the maximum of corresponding elements. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 -// Min computes the minimum of corresponding elements. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMINSW, CPU Feature: AVX -func (x Int16x8) Min(y Int16x8) Int16x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 -// MulHigh multiplies elements and stores the high part of the result. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 -// MulLow multiplies elements and stores the low part of the result. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) MulLow(y Int16x8) Int16x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 -// NotEqual compares for inequality. -// Const Immediate = 4. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 +/* FusedMultiplyAdd213 */ -// Or performs a bitwise OR operation between two vectors. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPOR, CPU Feature: AVX -func (x Int16x8) Or(y Int16x8) Int16x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +/* FusedMultiplyAdd231 */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPSIGNW, CPU Feature: AVX -func (x Int16x8) Sign(y Int16x8) Int16x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPSUBW, CPU Feature: AVX -func (x Int16x8) Sub(y Int16x8) Int16x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 -// Xor performs a bitwise XOR operation between two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPXOR, CPU Feature: AVX -func (x Int16x8) Xor(y Int16x8) Int16x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) Add(y Int32x16) Int32x16 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 -// And performs a masked bitwise AND operation between two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) And(y Int32x16) Int32x16 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) AndNot(y Int32x16) Int32x16 +/* FusedMultiplyAddSub132 */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x16) Equal(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x16) Greater(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 -// Less compares for less than. -// Const Immediate = 1. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) Less(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) LessEqual(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 -// Absolute computes the absolute value of each element. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 +/* FusedMultiplyAddSub213 */ -// Max computes the maximum of corresponding elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) Max(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) Min(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MulLow(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) NotEqual(y Int32x16) Mask32x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 -// Or performs a masked bitwise OR operation between two vectors. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) Or(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) Sub(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) Xor(y Int32x16) Int32x16 +/* FusedMultiplyAddSub231 */ -// Add adds corresponding elements of two vectors. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPADDD, CPU Feature: AVX -func (x Int32x4) Add(y Int32x4) Int32x4 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 -// And performs a bitwise AND operation between two vectors. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPAND, CPU Feature: AVX -func (x Int32x4) And(y Int32x4) Int32x4 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int32x4) AndNot(y Int32x4) Int32x4 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 -// Equal compares for equality. -// Const Immediate = 0. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPCMPEQD, CPU Feature: AVX -func (x Int32x4) Equal(y Int32x4) Mask32x4 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPCMPGTD, CPU Feature: AVX -func (x Int32x4) Greater(y Int32x4) Mask32x4 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) Less(y Int32x4) Mask32x4 +/* FusedMultiplySub132 */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPMINSD, CPU Feature: AVX -func (x Int32x4) Min(y Int32x4) Int32x4 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) MulLow(y Int32x4) Int32x4 +/* FusedMultiplySub213 */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 -// Or performs a bitwise OR operation between two vectors. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPOR, CPU Feature: AVX -func (x Int32x4) Or(y Int32x4) Int32x4 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPSIGND, CPU Feature: AVX -func (x Int32x4) Sign(y Int32x4) Int32x4 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPSUBD, CPU Feature: AVX -func (x Int32x4) Sub(y Int32x4) Int32x4 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Int32x4) Xor(y Int32x4) Int32x4 +/* FusedMultiplySub231 */ -// Add adds corresponding elements of two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPADDD, CPU Feature: AVX2 -func (x Int32x8) Add(y Int32x8) Int32x8 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 -// And performs a bitwise AND operation between two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int32x8) And(y Int32x8) Int32x8 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int32x8) AndNot(y Int32x8) Int32x8 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 -// Equal compares for equality. -// Const Immediate = 0. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPCMPEQD, CPU Feature: AVX2 -func (x Int32x8) Equal(y Int32x8) Mask32x8 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPCMPGTD, CPU Feature: AVX2 -func (x Int32x8) Greater(y Int32x8) Mask32x8 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) Less(y Int32x8) Mask32x8 +/* FusedMultiplySubAdd132 */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPMINSD, CPU Feature: AVX2 -func (x Int32x8) Min(y Int32x8) Int32x8 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) MulLow(y Int32x8) Int32x8 +/* FusedMultiplySubAdd213 */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 -// Or performs a bitwise OR operation between two vectors. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int32x8) Or(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPSIGND, CPU Feature: AVX2 -func (x Int32x8) Sign(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPSUBD, CPU Feature: AVX2 -func (x Int32x8) Sub(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. +/* FusedMultiplySubAdd231 */ + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int32x8) Xor(y Int32x8) Int32x8 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPADDQ, CPU Feature: AVX -func (x Int64x2) Add(y Int64x2) Int64x2 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 -// And performs a bitwise AND operation between two vectors. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPAND, CPU Feature: AVX -func (x Int64x2) And(y Int64x2) Int64x2 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int64x2) AndNot(y Int64x2) Int64x2 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 -// Equal compares for equality. -// Const Immediate = 0. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPCMPEQQ, CPU Feature: AVX -func (x Int64x2) Equal(y Int64x2) Mask64x2 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x2) Greater(y Int64x2) Mask64x2 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +/* FusedNegativeMultiplyAdd132 */ + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Less(y Int64x2) Mask64x2 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 -// Absolute computes the absolute value of each element. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Max(y Int64x2) Int64x2 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 -// Min computes the minimum of corresponding elements. +/* FusedNegativeMultiplyAdd213 */ + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Min(y Int64x2) Int64x2 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulLow(y Int64x2) Int64x2 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 -// Or performs a bitwise OR operation between two vectors. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPOR, CPU Feature: AVX -func (x Int64x2) Or(y Int64x2) Int64x2 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPSUBQ, CPU Feature: AVX -func (x Int64x2) Sub(y Int64x2) Int64x2 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. +/* FusedNegativeMultiplyAdd231 */ + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPXOR, CPU Feature: AVX -func (x Int64x2) Xor(y Int64x2) Int64x2 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPADDQ, CPU Feature: AVX2 -func (x Int64x4) Add(y Int64x4) Int64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 -// And performs a bitwise AND operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int64x4) And(y Int64x4) Int64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int64x4) AndNot(y Int64x4) Int64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 -// Equal compares for equality. -// Const Immediate = 0. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPCMPEQQ, CPU Feature: AVX2 -func (x Int64x4) Equal(y Int64x4) Mask64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPCMPGTQ, CPU Feature: AVX2 -func (x Int64x4) Greater(y Int64x4) Mask64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +/* FusedNegativeMultiplySub132 */ + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) Less(y Int64x4) Mask64x4 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 -// Absolute computes the absolute value of each element. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Max(y Int64x4) Int64x4 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 -// Min computes the minimum of corresponding elements. +/* FusedNegativeMultiplySub213 */ + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Min(y Int64x4) Int64x4 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulLow(y Int64x4) Int64x4 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 -// Or performs a bitwise OR operation between two vectors. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int64x4) Or(y Int64x4) Int64x4 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPSUBQ, CPU Feature: AVX2 -func (x Int64x4) Sub(y Int64x4) Int64x4 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. +/* FusedNegativeMultiplySub231 */ + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int64x4) Xor(y Int64x4) Int64x4 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) Add(y Int64x8) Int64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 -// And performs a masked bitwise AND operation between two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) And(y Int64x8) Int64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) AndNot(y Int64x8) Int64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x8) Equal(y Int64x8) Mask64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 + +/* Greater */ // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x8) Greater(y Int64x8) Mask64x8 +// Asm: VPCMPGTB, CPU Feature: AVX +func (x Int8x16) Greater(y Int8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 +// Asm: VPCMPGTB, CPU Feature: AVX2 +func (x Int8x32) Greater(y Int8x32) Mask8x32 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) Less(y Int64x8) Mask64x8 +// Asm: VPCMPGTW, CPU Feature: AVX +func (x Int16x8) Greater(y Int16x8) Mask16x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) LessEqual(y Int64x8) Mask64x8 +// Asm: VPCMPGTW, CPU Feature: AVX2 +func (x Int16x16) Greater(y Int16x16) Mask16x16 -// Absolute computes the absolute value of each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 +// Asm: VPCMPGTD, CPU Feature: AVX +func (x Int32x4) Greater(y Int32x4) Mask32x4 -// PopCount counts the number of set bits in each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 +// Asm: VPCMPGTD, CPU Feature: AVX2 +func (x Int32x8) Greater(y Int32x8) Mask32x8 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Max(y Int64x8) Int64x8 +// Asm: VPCMPGTQ, CPU Feature: AVX2 +func (x Int64x4) Greater(y Int64x4) Mask64x4 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Min(y Int64x8) Int64x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Greater(y Float32x4) Mask32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Greater(y Float32x8) Mask32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulLow(y Int64x8) Int64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Greater(y Float32x16) Mask32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) NotEqual(y Int64x8) Mask64x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Greater(y Float64x2) Mask64x2 -// Or performs a masked bitwise OR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Or(y Int64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Greater(y Float64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) Sub(y Int64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Xor(y Int64x8) Int64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Greater(y Int8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDB, CPU Feature: AVX -func (x Int8x16) Add(y Int8x16) Int8x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Greater(y Int16x32) Mask16x32 -// And performs a bitwise AND operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPAND, CPU Feature: AVX -func (x Int8x16) And(y Int8x16) Int8x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Greater(y Int32x16) Mask32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int8x16) AndNot(y Int8x16) Int8x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Greater(y Int64x2) Mask64x2 -// Equal compares for equality. -// Const Immediate = 0. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPEQB, CPU Feature: AVX -func (x Int8x16) Equal(y Int8x16) Mask8x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VPCMPGTB, CPU Feature: AVX -func (x Int8x16) Greater(y Int8x16) Mask8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) Less(y Int8x16) Mask8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Greater(y Uint8x64) Mask8x64 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 -// Absolute computes the absolute value of each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 -// PopCount counts the number of set bits in each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Greater(y Uint16x32) Mask16x32 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMINSB, CPU Feature: AVX -func (x Int8x16) Min(y Int8x16) Int8x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Greater(y Uint32x16) Mask32x16 -// Or performs a bitwise OR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPOR, CPU Feature: AVX -func (x Int8x16) Or(y Int8x16) Int8x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Greater(y Uint64x8) Mask64x8 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX -func (x Int8x16) Sign(y Int8x16) Int8x16 +/* GreaterEqual */ -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBB, CPU Feature: AVX -func (x Int8x16) Sub(y Int8x16) Int8x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 -// Xor performs a bitwise XOR operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPXOR, CPU Feature: AVX -func (x Int8x16) Xor(y Int8x16) Int8x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDB, CPU Feature: AVX2 -func (x Int8x32) Add(y Int8x32) Int8x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 -// And performs a bitwise AND operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int8x32) And(y Int8x32) Int8x32 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 -// AndNot performs a bitwise AND NOT operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int8x32) AndNot(y Int8x32) Int8x32 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 -// Equal compares for equality. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQB, CPU Feature: AVX2 -func (x Int8x32) Equal(y Int8x32) Mask8x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTB, CPU Feature: AVX2 -func (x Int8x32) Greater(y Int8x32) Mask8x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. @@ -2239,297 +2138,315 @@ func (x Int8x32) Greater(y Int8x32) Mask8x32 // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) Less(y Int8x32) Mask8x32 +func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 -// Absolute computes the absolute value of each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 -// PopCount counts the number of set bits in each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSB, CPU Feature: AVX2 -func (x Int8x32) Min(y Int8x32) Int8x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 -// Or performs a bitwise OR operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int8x32) Or(y Int8x32) Int8x32 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSIGNB, CPU Feature: AVX2 -func (x Int8x32) Sign(y Int8x32) Int8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBB, CPU Feature: AVX2 -func (x Int8x32) Sub(y Int8x32) Int8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 -// Xor performs a bitwise XOR operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int8x32) Xor(y Int8x32) Int8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) Add(y Int8x64) Int8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x64) Equal(y Int8x64) Mask8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x64) Greater(y Int8x64) Mask8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) Less(y Int8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) LessEqual(y Int8x64) Mask8x64 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 -// Absolute computes the absolute value of each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 -// PopCount counts the number of set bits in each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) Max(y Int8x64) Int8x64 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) Min(y Int8x64) Int8x64 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 -// NotEqual compares for inequality. -// Const Immediate = 4. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) NotEqual(y Int8x64) Mask8x64 +/* IsNan */ -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) IsNan(y Float32x4) Mask32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) IsNan(y Float32x8) Mask32x8 -// Sub subtracts corresponding elements of two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) Sub(y Int8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) IsNan(y Float32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPADDW, CPU Feature: AVX2 -func (x Uint16x16) Add(y Uint16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) IsNan(y Float64x2) Mask64x2 -// And performs a bitwise AND operation between two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint16x16) And(y Uint16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) IsNan(y Float64x4) Mask64x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) IsNan(y Float64x8) Mask64x8 -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX2 -func (x Uint16x16) Average(y Uint16x16) Uint16x16 +/* Less */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Equal(y Uint16x16) Mask16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Less(y Float32x4) Mask32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Greater(y Uint16x16) Mask16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Less(y Float32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. // Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Less(y Uint16x16) Mask16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Less(y Float64x2) Mask64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Less(y Float64x4) Mask64x4 -// PopCount counts the number of set bits in each element. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Less(y Float64x8) Mask64x8 -// Max computes the maximum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) Less(y Int8x16) Mask8x16 -// Min computes the minimum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMINUW, CPU Feature: AVX2 -func (x Uint16x16) Min(y Uint16x16) Uint16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) Less(y Int8x32) Mask8x32 -// MulHigh multiplies elements and stores the high part of the result. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Less(y Int8x64) Mask8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) Less(y Int16x8) Mask16x8 -// Or performs a bitwise OR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) Less(y Int16x16) Mask16x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Less(y Int16x32) Mask16x32 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) Less(y Int32x4) Mask32x4 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) Less(y Int32x8) Mask32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Less(y Int32x16) Mask32x16 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Less(y Int64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBW, CPU Feature: AVX2 -func (x Uint16x16) Sub(y Uint16x16) Uint16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) Less(y Int64x4) Mask64x4 -// Xor performs a bitwise XOR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint16x16) Xor(y Uint16x16) Uint16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Less(y Int64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) Add(y Uint16x32) Uint16x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Less(y Uint8x16) Mask8x16 -// Average computes the rounded average of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) Average(y Uint16x32) Uint16x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Less(y Uint8x32) Mask8x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Equal(y Uint16x32) Mask16x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Greater(y Uint16x32) Mask16x32 +func (x Uint16x8) Less(y Uint16x8) Mask16x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 +func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. // Const Immediate = 1. @@ -2537,2499 +2454,2439 @@ func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Max(y Uint16x32) Uint16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Min(y Uint16x32) Uint16x32 - -// MulHigh multiplies elements and stores the high part of the result, masked. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Less(y Uint32x8) Mask32x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Less(y Uint64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) Sub(y Uint16x32) Uint16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Less(y Uint64x8) Mask64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX -func (x Uint16x8) Add(y Uint16x8) Uint16x8 +/* LessEqual */ -// And performs a bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint16x8) And(y Uint16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPAVGW, CPU Feature: AVX -func (x Uint16x8) Average(y Uint16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Equal(y Uint16x8) Mask16x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Greater(y Uint16x8) Mask16x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Less(y Uint16x8) Mask16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. // Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 -// PopCount counts the number of set bits in each element. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 -// Max computes the maximum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 -// Min computes the minimum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMINUW, CPU Feature: AVX -func (x Uint16x8) Min(y Uint16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 -// MulHigh multiplies elements and stores the high part of the result. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 -// Or performs a bitwise OR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint16x8) Or(y Uint16x8) Uint16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPSUBW, CPU Feature: AVX -func (x Uint16x8) Sub(y Uint16x8) Uint16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 -// Xor performs a bitwise XOR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint16x8) Xor(y Uint16x8) Uint16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) Add(y Uint32x16) Uint32x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) And(y Uint32x16) Uint32x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Equal(y Uint32x16) Mask32x16 +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Greater(y Uint32x16) Mask32x16 +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 -// Less compares for less than. -// Const Immediate = 1. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Less(y Uint32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. // Const Immediate = 2. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 -// PopCount counts the number of set bits in each element. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Max(y Uint32x16) Uint32x16 +/* MaskedAbsolute */ -// Min computes the minimum of corresponding elements. +// Absolute computes the absolute value of each element. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Min(y Uint32x16) Uint32x16 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 -// Or performs a masked bitwise OR operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Or(y Uint32x16) Uint32x16 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 -// Sub subtracts corresponding elements of two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) Sub(y Uint32x16) Uint32x16 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Xor(y Uint32x16) Uint32x16 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 -// Add adds corresponding elements of two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPADDD, CPU Feature: AVX -func (x Uint32x4) Add(y Uint32x4) Uint32x4 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 -// And performs a bitwise AND operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint32x4) And(y Uint32x4) Uint32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Equal(y Uint32x4) Mask32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Greater(y Uint32x4) Mask32x4 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 -// Less compares for less than. -// Const Immediate = 1. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Less(y Uint32x4) Mask32x4 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 +/* MaskedAdd */ -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMINUD, CPU Feature: AVX -func (x Uint32x4) Min(y Uint32x4) Uint32x4 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Add adds corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 -// Or performs a bitwise OR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Add adds corresponding elements of two vectors. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Add adds corresponding elements of two vectors. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 -// Sub subtracts corresponding elements of two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX -func (x Uint32x4) Sub(y Uint32x4) Uint32x4 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 -// Xor performs a bitwise XOR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint32x4) Xor(y Uint32x4) Uint32x4 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX2 -func (x Uint32x8) Add(y Uint32x8) Uint32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 -// And performs a bitwise AND operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint32x8) And(y Uint32x8) Uint32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Equal(y Uint32x8) Mask32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Greater(y Uint32x8) Mask32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 -// Less compares for less than. -// Const Immediate = 1. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Less(y Uint32x8) Mask32x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 -// Max computes the maximum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 -// Min computes the minimum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMINUD, CPU Feature: AVX2 -func (x Uint32x8) Min(y Uint32x8) Uint32x8 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Add adds corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 -// Or performs a bitwise OR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Add adds corresponding elements of two vectors. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX2 -func (x Uint32x8) Sub(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 -// Xor performs a bitwise XOR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint32x8) Xor(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX -func (x Uint64x2) Add(y Uint64x2) Uint64x2 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 -// And performs a bitwise AND operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint64x2) And(y Uint64x2) Uint64x2 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Equal(y Uint64x2) Mask64x2 +/* MaskedAnd */ -// Greater compares for greater than. -// Const Immediate = 6. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Greater(y Uint64x2) Mask64x2 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Less(y Uint64x2) Mask64x2 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Max(y Uint64x2) Uint64x2 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 -// Min computes the minimum of corresponding elements. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Min(y Uint64x2) Uint64x2 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 -// Or performs a bitwise OR operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX -func (x Uint64x2) Sub(y Uint64x2) Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 -// Xor performs a bitwise XOR operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint64x2) Xor(y Uint64x2) Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 -// Add adds corresponding elements of two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPADDQ, CPU Feature: AVX2 -func (x Uint64x4) Add(y Uint64x4) Uint64x4 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 -// And performs a bitwise AND operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint64x4) And(y Uint64x4) Uint64x4 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Equal(y Uint64x4) Mask64x4 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Greater(y Uint64x4) Mask64x4 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Less(y Uint64x4) Mask64x4 +/* MaskedAndNot */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 -// PopCount counts the number of set bits in each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 -// Max computes the maximum of corresponding elements. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Max(y Uint64x4) Uint64x4 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 -// Min computes the minimum of corresponding elements. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Min(y Uint64x4) Uint64x4 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 -// Or performs a bitwise OR operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX2 -func (x Uint64x4) Sub(y Uint64x4) Uint64x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 -// Xor performs a bitwise XOR operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint64x4) Xor(y Uint64x4) Uint64x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 -// Add adds corresponding elements of two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Add(y Uint64x8) Uint64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 -// And performs a masked bitwise AND operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) And(y Uint64x8) Uint64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 // AndNot performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 +func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Equal(y Uint64x8) Mask64x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Greater(y Uint64x8) Mask64x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 -// Less compares for less than. -// Const Immediate = 1. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Less(y Uint64x8) Mask64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Max(y Uint64x8) Uint64x8 +/* MaskedApproximateReciprocal */ -// Min computes the minimum of corresponding elements. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Min(y Uint64x8) Uint64x8 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 -// Or performs a masked bitwise OR operation between two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Or(y Uint64x8) Uint64x8 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 -// Sub subtracts corresponding elements of two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Sub(y Uint64x8) Uint64x8 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Xor(y Uint64x8) Uint64x8 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX -func (x Uint8x16) Add(y Uint8x16) Uint8x16 +/* MaskedApproximateReciprocalOfSqrt */ -// And performs a bitwise AND operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint8x16) And(y Uint8x16) Uint8x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 -// Average computes the rounded average of corresponding elements. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPAVGB, CPU Feature: AVX -func (x Uint8x16) Average(y Uint8x16) Uint8x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Equal(y Uint8x16) Mask8x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Greater(y Uint8x16) Mask8x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Less(y Uint8x16) Mask8x16 +/* MaskedAverage */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Average computes the rounded average of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 -// PopCount counts the number of set bits in each element. +// Average computes the rounded average of corresponding elements. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 -// Max computes the maximum of corresponding elements. +// Average computes the rounded average of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 -// Min computes the minimum of corresponding elements. +// Average computes the rounded average of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX -func (x Uint8x16) Min(y Uint8x16) Uint8x16 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Average computes the rounded average of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 -// Or performs a bitwise OR operation between two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint8x16) Or(y Uint8x16) Uint8x16 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 +/* MaskedCeilSuppressExceptionWithPrecision */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPSUBB, CPU Feature: AVX -func (x Uint8x16) Sub(y Uint8x16) Uint8x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Xor performs a bitwise XOR operation between two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint8x16) Xor(y Uint8x16) Uint8x16 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPADDB, CPU Feature: AVX2 -func (x Uint8x32) Add(y Uint8x32) Uint8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// And performs a bitwise AND operation between two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint8x32) And(y Uint8x32) Uint8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 +/* MaskedCeilWithPrecision */ -// Average computes the rounded average of corresponding elements. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPAVGB, CPU Feature: AVX2 -func (x Uint8x32) Average(y Uint8x32) Uint8x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Equal(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Greater(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Less(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 -// LessEqual compares for less than or equal. +// CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 +/* MaskedDiffWithCeilSuppressExceptionWithPrecision */ -// Max computes the maximum of corresponding elements. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPMINUB, CPU Feature: AVX2 -func (x Uint8x32) Min(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Or performs a bitwise OR operation between two vectors. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +/* MaskedDiffWithCeilWithPrecision */ -// Sub subtracts corresponding elements of two vectors. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPSUBB, CPU Feature: AVX2 -func (x Uint8x32) Sub(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Xor performs a bitwise XOR operation between two vectors. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint8x32) Xor(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Add adds corresponding elements of two vectors. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) Add(y Uint8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Average computes the rounded average of corresponding elements. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) Average(y Uint8x64) Uint8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Equal(y Uint8x64) Mask8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Greater(y Uint8x64) Mask8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 +/* MaskedDiffWithFloorSuppressExceptionWithPrecision */ -// Less compares for less than. -// Const Immediate = 1. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Less(y Uint8x64) Mask8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Max(y Uint8x64) Uint8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Min(y Uint8x64) Uint8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 +/* MaskedDiffWithFloorWithPrecision */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) Sub(y Uint8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 +/* MaskedDiffWithRoundSuppressExceptionWithPrecision */ -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 +/* MaskedDiffWithRoundWithPrecision */ -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 +/* MaskedDiffWithTruncSuppressExceptionWithPrecision */ -// And performs a masked bitwise AND operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Div divides elements of two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +/* MaskedDiffWithTruncWithPrecision */ + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 +/* MaskedDiv */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// Div divides elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 -// Or performs a masked bitwise OR operation between two vectors. +// Div divides elements of two vectors. // -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// Div divides elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Div divides elements of two vectors. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Div divides elements of two vectors. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Div divides elements of two vectors. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 +/* MaskedEqual */ -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 -// And performs a masked bitwise AND operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 -// Div divides elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 // Equal compares for equality, masked. // Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Greater compares for greater than. -// Const Immediate = 6. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Less compares for less than. -// Const Immediate = 1. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Max computes the maximum of corresponding elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 -// Min computes the minimum of corresponding elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 +/* MaskedFloorSuppressExceptionWithPrecision */ -// Sub subtracts corresponding elements of two vectors. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 +/* MaskedFloorWithPrecision */ -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 +/* MaskedFusedMultiplyAdd132 */ -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 +/* MaskedFusedMultiplyAdd213 */ -// And performs a masked bitwise AND operation between two vectors. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Div divides elements of two vectors. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 +/* MaskedFusedMultiplyAdd231 */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 +/* MaskedFusedMultiplyAddSub132 */ -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // // Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 +func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +/* MaskedFusedMultiplyAddSub213 */ // FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +/* MaskedFusedMultiplyAddSub231 */ + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +/* MaskedFusedMultiplySub132 */ + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Add adds corresponding elements of two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// And performs a masked bitwise AND operation between two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Div divides elements of two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +/* MaskedFusedMultiplySub213 */ + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 +/* MaskedFusedMultiplySub231 */ -// Min computes the minimum of corresponding elements. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Mul multiplies corresponding elements of two vectors, masked. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// MulByPowOf2 multiplies elements by a power of 2. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Or performs a masked bitwise OR operation between two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +/* MaskedFusedMultiplySubAdd132 */ + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +/* MaskedFusedMultiplySubAdd213 */ + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +/* MaskedFusedMultiplySubAdd231 */ // FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 +/* MaskedFusedNegativeMultiplyAdd132 */ -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// And performs a masked bitwise AND operation between two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Div divides elements of two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +/* MaskedFusedNegativeMultiplyAdd213 */ + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Max computes the maximum of corresponding elements. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Min computes the minimum of corresponding elements. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Mul multiplies corresponding elements of two vectors, masked. +/* MaskedFusedNegativeMultiplyAdd231 */ + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// MulByPowOf2 multiplies elements by a power of 2. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Or performs a masked bitwise OR operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Sub subtracts corresponding elements of two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 +/* MaskedFusedNegativeMultiplySub132 */ -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 +/* MaskedFusedNegativeMultiplySub213 */ -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +/* MaskedFusedNegativeMultiplySub231 */ + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // // Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 +func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// And performs a masked bitwise AND operation between two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 +/* MaskedGreater */ -// Div divides elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 -// Mul multiplies corresponding elements of two vectors, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 -// MulByPowOf2 multiplies elements by a power of 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 -// Or performs a masked bitwise OR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 +/* MaskedGreaterEqual */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 - -// NotEqual compares for inequality. -// Const Immediate = 4. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 -// And performs a masked bitwise AND operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 +/* MaskedIsNan */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 -// Max computes the maximum of corresponding elements. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 -// Min computes the minimum of corresponding elements. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 -// Or performs a masked bitwise OR operation between two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +/* MaskedLess */ + +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 -// And performs a masked bitwise AND operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 // Less compares for less than. // Const Immediate = 1. @@ -5037,2670 +4894,3036 @@ func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 -// Max computes the maximum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 -// Min computes the minimum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 -// Or performs a masked bitwise OR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 -// And performs a masked bitwise AND operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 // Less compares for less than. // Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedLessEqual */ // LessEqual compares for less than or equal. // Const Immediate = 2. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 -// Or performs a masked bitwise OR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 -// Add adds corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 -// Less compares for less than. -// Const Immediate = 1. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. // Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 -// Max computes the maximum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Min computes the minimum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 -// MulLow multiplies elements and stores the low part of the result, masked. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Or performs a masked bitwise OR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 - -// Add adds corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 +/* MaskedMax */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// Max computes the maximum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 -// Or performs a masked bitwise OR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 -// Add adds corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 -// And performs a masked bitwise AND operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 -// Less compares for less than. -// Const Immediate = 1. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// Max computes the maximum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 -// Or performs a masked bitwise OR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 -// Add adds corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 -// Less compares for less than. -// Const Immediate = 1. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 +/* MaskedMin */ -// Max computes the maximum of corresponding elements. +// Min computes the minimum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 -// Greater compares for greater than. -// Const Immediate = 6. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 -// Less compares for less than. -// Const Immediate = 1. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. +// Min computes the minimum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 -// Add adds corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 -// Greater compares for greater than. -// Const Immediate = 6. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 -// Less compares for less than. -// Const Immediate = 1. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// Min computes the minimum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 +/* MaskedMul */ -// Average computes the rounded average of corresponding elements. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 +/* MaskedMulByPowOf2 */ -// Min computes the minimum of corresponding elements. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +/* MaskedMulEvenWiden */ -// Add adds corresponding elements of two vectors. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 -// Average computes the rounded average of corresponding elements. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 -// Greater compares for greater than. -// Const Immediate = 6. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 -// Less compares for less than. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +/* MaskedMulHigh */ + +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 -// Min computes the minimum of corresponding elements. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 // MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 +/* MaskedMulLow */ -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 -// Add adds corresponding elements of two vectors. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 -// Average computes the rounded average of corresponding elements. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 -// Less compares for less than. -// Const Immediate = 1. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 +/* MaskedNotEqual */ -// Min computes the minimum of corresponding elements. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 -// MulHigh multiplies elements and stores the high part of the result, masked. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. // Const Immediate = 4. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 -// And performs a masked bitwise AND operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 -// Less compares for less than. -// Const Immediate = 1. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. // Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 -// Sub subtracts corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 -// And performs a masked bitwise AND operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Less compares for less than. -// Const Immediate = 1. +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Max computes the maximum of corresponding elements. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Min computes the minimum of corresponding elements. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. // Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedOr */ // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 -// Add adds corresponding elements of two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 -// And performs a masked bitwise AND operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 -// Less compares for less than. -// Const Immediate = 1. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +/* MaskedPairDotProd */ -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 -// Add adds corresponding elements of two vectors. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 +/* MaskedPairDotProdAccumulate */ -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 +/* MaskedPopCount */ -// Less compares for less than. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 -// Max computes the maximum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 -// Min computes the minimum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// PopCount counts the number of set bits in each element. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 -// Or performs a masked bitwise OR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 -// Add adds corresponding elements of two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 -// And performs a masked bitwise AND operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 -// Greater compares for greater than. -// Const Immediate = 6. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 -// Less compares for less than. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// PopCount counts the number of set bits in each element. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 -// Or performs a masked bitwise OR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 +/* MaskedRoundSuppressExceptionWithPrecision */ -// And performs a masked bitwise AND operation between two vectors. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Less compares for less than. -// Const Immediate = 1. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 +/* MaskedRoundWithPrecision */ -// Max computes the maximum of corresponding elements. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Or performs a masked bitwise OR operation between two vectors. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +/* MaskedSaturatedAdd */ -// Add adds corresponding elements of two vectors. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 -// Average computes the rounded average of corresponding elements. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 -// Greater compares for greater than. -// Const Immediate = 6. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 -// Less compares for less than. -// Const Immediate = 1. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 -// Max computes the maximum of corresponding elements. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 -// Min computes the minimum of corresponding elements. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +/* MaskedSaturatedPairDotProdAccumulate */ -// Add adds corresponding elements of two vectors. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 -// Average computes the rounded average of corresponding elements. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 +/* MaskedSaturatedSub */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 -// Less compares for less than. -// Const Immediate = 1. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 -// Max computes the maximum of corresponding elements. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 -// Min computes the minimum of corresponding elements. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 -// Sub subtracts corresponding elements of two vectors. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 -// Add adds corresponding elements of two vectors. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 -// Average computes the rounded average of corresponding elements. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 +/* MaskedSaturatedUnsignedSignedPairDotProd */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 -// Less compares for less than. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 +/* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ -// Min computes the minimum of corresponding elements. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +/* MaskedSqrt */ -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Sqrt computes the square root of each element. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Sqrt computes the square root of each element. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Sqrt computes the square root of each element. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Sqrt computes the square root of each element. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Sqrt computes the square root of each element. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Sqrt computes the square root of each element. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +/* MaskedSub */ -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +/* MaskedTruncSuppressExceptionWithPrecision */ -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +/* MaskedTruncWithPrecision */ -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +/* MaskedUnsignedSignedQuadDotProdAccumulate */ -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +/* MaskedXor */ -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +/* Max */ -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +/* Min */ -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) Min(y Float32x16) Float32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) Min(y Float64x8) Float64x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +// Asm: VPMINSB, CPU Feature: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +// Asm: VPMINSB, CPU Feature: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) Min(y Int8x64) Int8x64 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +// Asm: VPMINSW, CPU Feature: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +// Asm: VPMINSW, CPU Feature: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) Min(y Int16x32) Int16x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +// Asm: VPMINSD, CPU Feature: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +// Asm: VPMINSD, CPU Feature: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) Min(y Int32x16) Int32x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Min(y Int64x2) Int64x2 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Min(y Int64x4) Int64x4 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Min(y Int64x8) Int64x8 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPMINUB, CPU Feature: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPMINUB, CPU Feature: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Min(y Uint8x64) Uint8x64 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMINUW, CPU Feature: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMINUW, CPU Feature: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Min(y Uint16x32) Uint16x32 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 +// Asm: VPMINUD, CPU Feature: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 +// Asm: VPMINUD, CPU Feature: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Min(y Uint32x16) Uint32x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Min(y Uint64x2) Uint64x2 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Min(y Uint64x4) Uint64x4 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Min(y Uint64x8) Uint64x8 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +/* Mul */ -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) Mul(y Float32x16) Float32x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) Mul(y Float64x8) Float64x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 +/* MulByPowOf2 */ -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +/* MulEvenWiden */ -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 + +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +/* MulLow */ + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) MulLow(y Int16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) MulLow(y Int16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MulLow(y Int16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) MulLow(y Int32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) MulLow(y Int32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MulLow(y Int32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulLow(y Int64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulLow(y Int64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulLow(y Int64x8) Int64x8 + +/* NotEqual */ + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +/* Or */ + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x4) Or(y Float32x4) Float32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x8) Or(y Float32x8) Float32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x16) Or(y Float32x16) Float32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x2) Or(y Float64x2) Float64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x4) Or(y Float64x4) Float64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x8) Or(y Float64x8) Float64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) Or(y Int32x16) Int32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Or(y Int64x8) Int64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +/* PairDotProd */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +/* PairDotProdAccumulate */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +/* PairwiseAdd */ + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +/* PairwiseSub */ + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 + +/* PopCount */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) PopCount() Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) PopCount() Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) PopCount() Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) PopCount() Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) PopCount() Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) PopCount() Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) PopCount() Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) PopCount() Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) PopCount() Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) PopCount() Int64x2 + +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) PopCount() Int64x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) PopCount() Int64x8 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) PopCount() Uint8x16 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) PopCount() Uint8x32 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) PopCount() Uint8x64 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) PopCount() Uint16x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) PopCount() Uint16x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) PopCount() Uint16x32 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) PopCount() Uint32x4 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) PopCount() Uint32x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) PopCount() Uint32x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) PopCount() Uint64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) PopCount() Uint64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) PopCount() Uint64x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +/* Round */ + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 + +/* RoundSuppressExceptionWithPrecision */ // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. @@ -7720,23 +7943,25 @@ func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +/* RoundWithPrecision */ + // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. @@ -7756,653 +7981,726 @@ func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 - -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 - -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 - -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 +/* SaturatedAdd */ -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPADDSB, CPU Feature: AVX +func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPADDSW, CPU Feature: AVX +func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +/* SaturatedPairDotProdAccumulate */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +/* SaturatedPairwiseAdd */ + +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +/* SaturatedPairwiseSub */ + +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPHSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPHSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +/* SaturatedSub */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +/* SaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +/* SaturatedUnsignedSignedQuadDotProdAccumulate */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + +/* Sign */ + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX +func (x Int8x16) Sign(y Int8x16) Int8x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSIGNB, CPU Feature: AVX2 +func (x Int8x32) Sign(y Int8x32) Int8x32 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSIGNW, CPU Feature: AVX +func (x Int16x8) Sign(y Int16x8) Int16x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSIGNW, CPU Feature: AVX2 +func (x Int16x16) Sign(y Int16x16) Int16x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSIGND, CPU Feature: AVX +func (x Int32x4) Sign(y Int32x4) Int32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSIGND, CPU Feature: AVX2 +func (x Int32x8) Sign(y Int32x8) Int32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +/* Sqrt */ + +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VSQRTPS, CPU Feature: AVX +func (x Float32x4) Sqrt() Float32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VSQRTPS, CPU Feature: AVX +func (x Float32x8) Sqrt() Float32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) Sqrt() Float32x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VSQRTPD, CPU Feature: AVX +func (x Float64x2) Sqrt() Float64x2 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX +func (x Float64x4) Sqrt() Float64x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) Sqrt() Float64x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +/* Sub */ + +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VSUBPS, CPU Feature: AVX +func (x Float32x4) Sub(y Float32x4) Float32x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VSUBPS, CPU Feature: AVX +func (x Float32x8) Sub(y Float32x8) Float32x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) Sub(y Float32x16) Float32x16 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VSUBPD, CPU Feature: AVX +func (x Float64x2) Sub(y Float64x2) Float64x2 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX +func (x Float64x4) Sub(y Float64x4) Float64x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) Sub(y Float64x8) Float64x8 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBB, CPU Feature: AVX +func (x Int8x16) Sub(y Int8x16) Int8x16 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBB, CPU Feature: AVX2 +func (x Int8x32) Sub(y Int8x32) Int8x32 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) Sub(y Int8x64) Int8x64 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBW, CPU Feature: AVX +func (x Int16x8) Sub(y Int16x8) Int16x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBW, CPU Feature: AVX2 +func (x Int16x16) Sub(y Int16x16) Int16x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) Sub(y Int16x32) Int16x32 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBD, CPU Feature: AVX +func (x Int32x4) Sub(y Int32x4) Int32x4 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBD, CPU Feature: AVX2 +func (x Int32x8) Sub(y Int32x8) Int32x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) Sub(y Int32x16) Int32x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBQ, CPU Feature: AVX +func (x Int64x2) Sub(y Int64x2) Int64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBQ, CPU Feature: AVX2 +func (x Int64x4) Sub(y Int64x4) Int64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) Sub(y Int64x8) Int64x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBB, CPU Feature: AVX +func (x Uint8x16) Sub(y Uint8x16) Uint8x16 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBB, CPU Feature: AVX2 +func (x Uint8x32) Sub(y Uint8x32) Uint8x32 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) Sub(y Uint8x64) Uint8x64 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBW, CPU Feature: AVX +func (x Uint16x8) Sub(y Uint16x8) Uint16x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBW, CPU Feature: AVX2 +func (x Uint16x16) Sub(y Uint16x16) Uint16x16 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) Sub(y Uint16x32) Uint16x32 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBD, CPU Feature: AVX +func (x Uint32x4) Sub(y Uint32x4) Uint32x4 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBD, CPU Feature: AVX2 +func (x Uint32x8) Sub(y Uint32x8) Uint32x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) Sub(y Uint32x16) Uint32x16 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBQ, CPU Feature: AVX +func (x Uint64x2) Sub(y Uint64x2) Uint64x2 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBQ, CPU Feature: AVX2 +func (x Uint64x4) Sub(y Uint64x4) Uint64x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Sub(y Uint64x8) Uint64x8 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +/* Trunc */ + +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Trunc() Float32x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Trunc() Float32x8 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Trunc() Float64x2 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Trunc() Float64x4 + +/* TruncSuppressExceptionWithPrecision */ // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +/* TruncWithPrecision */ // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 + +/* UnsignedSignedQuadDotProdAccumulate */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + +/* Xor */ + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX +func (x Float32x4) Xor(y Float32x4) Float32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX +func (x Float32x8) Xor(y Float32x8) Float32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x16) Xor(y Float32x16) Float32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX +func (x Float64x2) Xor(y Float64x2) Float64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX +func (x Float64x4) Xor(y Float64x4) Float64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x8) Xor(y Float64x8) Float64x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int8x16) Xor(y Int8x16) Int8x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int8x32) Xor(y Int8x32) Int8x32 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int16x8) Xor(y Int16x8) Int16x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int16x16) Xor(y Int16x16) Int16x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int32x4) Xor(y Int32x4) Int32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int32x8) Xor(y Int32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) Xor(y Int32x16) Int32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int64x2) Xor(y Int64x2) Int64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int64x4) Xor(y Int64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Xor(y Int64x8) Int64x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint8x16) Xor(y Uint8x16) Uint8x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint8x32) Xor(y Uint8x32) Uint8x32 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint16x8) Xor(y Uint16x8) Uint16x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint16x16) Xor(y Uint16x16) Uint16x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint32x4) Xor(y Uint32x4) Uint32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint32x8) Xor(y Uint32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Xor(y Uint32x16) Uint32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint64x2) Xor(y Uint64x2) Uint64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint64x4) Xor(y Uint64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Xor(y Uint64x8) Uint64x8 // Float64x8 converts from Float32x16 to Float64x8 func (from Float32x16) AsFloat64x8() (to Float64x8) diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index ab0f15a89e2a31..67f4d297024b01 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -9,6 +9,25 @@ type v128 struct { _128 struct{} } +// Int8x16 is a 128-bit SIMD vector of 16 int8 +type Int8x16 struct { + int8x16 v128 + vals [16]int8 +} + +// Len returns the number of elements in a Int8x16 +func (x Int8x16) Len() int { return 16 } + +// LoadInt8x16 loads a Int8x16 from an array +// +//go:noescape +func LoadInt8x16(y *[16]int8) Int8x16 + +// Store stores a Int8x16 to an array +// +//go:noescape +func (x Int8x16) Store(y *[16]int8) + // Int16x8 is a 128-bit SIMD vector of 8 int16 type Int16x8 struct { int16x8 v128 @@ -47,25 +66,6 @@ func LoadInt32x4(y *[4]int32) Int32x4 //go:noescape func (x Int32x4) Store(y *[4]int32) -// Int8x16 is a 128-bit SIMD vector of 16 int8 -type Int8x16 struct { - int8x16 v128 - vals [16]int8 -} - -// Len returns the number of elements in a Int8x16 -func (x Int8x16) Len() int { return 16 } - -// LoadInt8x16 loads a Int8x16 from an array -// -//go:noescape -func LoadInt8x16(y *[16]int8) Int8x16 - -// Store stores a Int8x16 to an array -// -//go:noescape -func (x Int8x16) Store(y *[16]int8) - // Int64x2 is a 128-bit SIMD vector of 2 int64 type Int64x2 struct { int64x2 v128 @@ -129,6 +129,25 @@ func LoadFloat64x2(y *[2]float64) Float64x2 //go:noescape func (x Float64x2) Store(y *[2]float64) +// Uint8x16 is a 128-bit SIMD vector of 16 uint8 +type Uint8x16 struct { + uint8x16 v128 + vals [16]uint8 +} + +// Len returns the number of elements in a Uint8x16 +func (x Uint8x16) Len() int { return 16 } + +// LoadUint8x16 loads a Uint8x16 from an array +// +//go:noescape +func LoadUint8x16(y *[16]uint8) Uint8x16 + +// Store stores a Uint8x16 to an array +// +//go:noescape +func (x Uint8x16) Store(y *[16]uint8) + // Uint16x8 is a 128-bit SIMD vector of 8 uint16 type Uint16x8 struct { uint16x8 v128 @@ -186,48 +205,48 @@ func LoadUint64x2(y *[2]uint64) Uint64x2 //go:noescape func (x Uint64x2) Store(y *[2]uint64) -// Uint8x16 is a 128-bit SIMD vector of 16 uint8 -type Uint8x16 struct { - uint8x16 v128 - vals [16]uint8 -} - -// Len returns the number of elements in a Uint8x16 -func (x Uint8x16) Len() int { return 16 } - -// LoadUint8x16 loads a Uint8x16 from an array -// -//go:noescape -func LoadUint8x16(y *[16]uint8) Uint8x16 - -// Store stores a Uint8x16 to an array -// -//go:noescape -func (x Uint8x16) Store(y *[16]uint8) - // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 vals [4]int32 } -// Mask16x8 is a 128-bit SIMD vector of 8 int16 -type Mask16x8 struct { - int16x8 v128 - vals [8]int16 -} - // Mask8x16 is a 128-bit SIMD vector of 16 int8 type Mask8x16 struct { int8x16 v128 vals [16]int8 } +// Mask16x8 is a 128-bit SIMD vector of 8 int16 +type Mask16x8 struct { + int16x8 v128 + vals [8]int16 +} + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} } +// Int8x32 is a 256-bit SIMD vector of 32 int8 +type Int8x32 struct { + int8x32 v256 + vals [32]int8 +} + +// Len returns the number of elements in a Int8x32 +func (x Int8x32) Len() int { return 32 } + +// LoadInt8x32 loads a Int8x32 from an array +// +//go:noescape +func LoadInt8x32(y *[32]int8) Int8x32 + +// Store stores a Int8x32 to an array +// +//go:noescape +func (x Int8x32) Store(y *[32]int8) + // Int16x16 is a 256-bit SIMD vector of 16 int16 type Int16x16 struct { int16x16 v256 @@ -266,25 +285,6 @@ func LoadInt32x8(y *[8]int32) Int32x8 //go:noescape func (x Int32x8) Store(y *[8]int32) -// Int8x32 is a 256-bit SIMD vector of 32 int8 -type Int8x32 struct { - int8x32 v256 - vals [32]int8 -} - -// Len returns the number of elements in a Int8x32 -func (x Int8x32) Len() int { return 32 } - -// LoadInt8x32 loads a Int8x32 from an array -// -//go:noescape -func LoadInt8x32(y *[32]int8) Int8x32 - -// Store stores a Int8x32 to an array -// -//go:noescape -func (x Int8x32) Store(y *[32]int8) - // Int64x4 is a 256-bit SIMD vector of 4 int64 type Int64x4 struct { int64x4 v256 @@ -348,6 +348,25 @@ func LoadFloat64x4(y *[4]float64) Float64x4 //go:noescape func (x Float64x4) Store(y *[4]float64) +// Uint8x32 is a 256-bit SIMD vector of 32 uint8 +type Uint8x32 struct { + uint8x32 v256 + vals [32]uint8 +} + +// Len returns the number of elements in a Uint8x32 +func (x Uint8x32) Len() int { return 32 } + +// LoadUint8x32 loads a Uint8x32 from an array +// +//go:noescape +func LoadUint8x32(y *[32]uint8) Uint8x32 + +// Store stores a Uint8x32 to an array +// +//go:noescape +func (x Uint8x32) Store(y *[32]uint8) + // Uint16x16 is a 256-bit SIMD vector of 16 uint16 type Uint16x16 struct { uint16x16 v256 @@ -405,48 +424,54 @@ func LoadUint64x4(y *[4]uint64) Uint64x4 //go:noescape func (x Uint64x4) Store(y *[4]uint64) -// Uint8x32 is a 256-bit SIMD vector of 32 uint8 -type Uint8x32 struct { - uint8x32 v256 - vals [32]uint8 -} - -// Len returns the number of elements in a Uint8x32 -func (x Uint8x32) Len() int { return 32 } - -// LoadUint8x32 loads a Uint8x32 from an array -// -//go:noescape -func LoadUint8x32(y *[32]uint8) Uint8x32 - -// Store stores a Uint8x32 to an array -// -//go:noescape -func (x Uint8x32) Store(y *[32]uint8) - // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 vals [8]int32 } -// Mask16x16 is a 256-bit SIMD vector of 16 int16 -type Mask16x16 struct { - int16x16 v256 - vals [16]int16 -} - // Mask8x32 is a 256-bit SIMD vector of 32 int8 type Mask8x32 struct { int8x32 v256 vals [32]int8 } +// Mask16x16 is a 256-bit SIMD vector of 16 int16 +type Mask16x16 struct { + int16x16 v256 + vals [16]int16 +} + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} } +// Int8x64 is a 512-bit SIMD vector of 64 int8 +type Int8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Len returns the number of elements in a Int8x64 +func (x Int8x64) Len() int { return 64 } + +// LoadInt8x64 loads a Int8x64 from an array +// +//go:noescape +func LoadInt8x64(y *[64]int8) Int8x64 + +// Store stores a Int8x64 to an array +// +//go:noescape +func (x Int8x64) Store(y *[64]int8) + +// Mask8x64 is a 512-bit SIMD vector of 64 int8 +type Mask8x64 struct { + int8x64 v512 + vals [64]int8 +} + // Int16x32 is a 512-bit SIMD vector of 32 int16 type Int16x32 struct { int16x32 v512 @@ -522,31 +547,6 @@ type Mask64x8 struct { vals [8]int64 } -// Int8x64 is a 512-bit SIMD vector of 64 int8 -type Int8x64 struct { - int8x64 v512 - vals [64]int8 -} - -// Len returns the number of elements in a Int8x64 -func (x Int8x64) Len() int { return 64 } - -// LoadInt8x64 loads a Int8x64 from an array -// -//go:noescape -func LoadInt8x64(y *[64]int8) Int8x64 - -// Store stores a Int8x64 to an array -// -//go:noescape -func (x Int8x64) Store(y *[64]int8) - -// Mask8x64 is a 512-bit SIMD vector of 64 int8 -type Mask8x64 struct { - int8x64 v512 - vals [64]int8 -} - // Float32x16 is a 512-bit SIMD vector of 16 float32 type Float32x16 struct { float32x16 v512 @@ -585,6 +585,25 @@ func LoadFloat64x8(y *[8]float64) Float64x8 //go:noescape func (x Float64x8) Store(y *[8]float64) +// Uint8x64 is a 512-bit SIMD vector of 64 uint8 +type Uint8x64 struct { + uint8x64 v512 + vals [64]uint8 +} + +// Len returns the number of elements in a Uint8x64 +func (x Uint8x64) Len() int { return 64 } + +// LoadUint8x64 loads a Uint8x64 from an array +// +//go:noescape +func LoadUint8x64(y *[64]uint8) Uint8x64 + +// Store stores a Uint8x64 to an array +// +//go:noescape +func (x Uint8x64) Store(y *[64]uint8) + // Uint16x32 is a 512-bit SIMD vector of 32 uint16 type Uint16x32 struct { uint16x32 v512 @@ -641,22 +660,3 @@ func LoadUint64x8(y *[8]uint64) Uint64x8 // //go:noescape func (x Uint64x8) Store(y *[8]uint64) - -// Uint8x64 is a 512-bit SIMD vector of 64 uint8 -type Uint8x64 struct { - uint8x64 v512 - vals [64]uint8 -} - -// Len returns the number of elements in a Uint8x64 -func (x Uint8x64) Len() int { return 64 } - -// LoadUint8x64 loads a Uint8x64 from an array -// -//go:noescape -func LoadUint8x64(y *[64]uint8) Uint8x64 - -// Store stores a Uint8x64 to an array -// -//go:noescape -func (x Uint8x64) Store(y *[64]uint8) From 21d657315440f61f2fb107a53e3b6fc2b4881a31 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 17 Jun 2025 10:43:59 -0400 Subject: [PATCH 031/139] [dev.simd] cmd/compile: alphabetize SIMD intrinsics This is the output of CL 682036 Change-Id: I432c6e059dff7019a6bba6b777ea7fe48990278f Reviewed-on: https://go-review.googlesource.com/c/go/+/682295 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- .../compile/internal/ssagen/simdintrinsics.go | 2924 ++++++++--------- 1 file changed, 1462 insertions(+), 1462 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index b86c81516601bc..4b1f8a212ae2f8 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -11,1437 +11,1413 @@ import ( const simdPackage = "simd" func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { - addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Absolute", opLen1(ssa.OpAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.Absolute", opLen1(ssa.OpAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Absolute", opLen1(ssa.OpAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Absolute", opLen1(ssa.OpAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.LessEqual", opLen2(ssa.OpLessEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Add", opLen2(ssa.OpAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Max", opLen2(ssa.OpMaxFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Min", opLen2(ssa.OpMinFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Add", opLen2(ssa.OpAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Add", opLen2(ssa.OpAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Add", opLen2(ssa.OpAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Add", opLen2(ssa.OpAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Add", opLen2(ssa.OpAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Add", opLen2(ssa.OpAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Add", opLen2(ssa.OpAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Add", opLen2(ssa.OpAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Add", opLen2(ssa.OpAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Add", opLen2(ssa.OpAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Add", opLen2(ssa.OpAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Add", opLen2(ssa.OpAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Add", opLen2(ssa.OpAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Add", opLen2(ssa.OpAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Add", opLen2(ssa.OpAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Add", opLen2(ssa.OpAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Add", opLen2(ssa.OpAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Add", opLen2(ssa.OpAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Max", opLen2(ssa.OpMaxFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Min", opLen2(ssa.OpMinFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Sub", opLen2(ssa.OpSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Add", opLen2(ssa.OpAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.And", opLen2(ssa.OpAndFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Max", opLen2(ssa.OpMaxFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Min", opLen2(ssa.OpMinFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Sub", opLen2(ssa.OpSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x16.Add", opLen2(ssa.OpAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.And", opLen2(ssa.OpAndInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Max", opLen2(ssa.OpMaxInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Min", opLen2(ssa.OpMinInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Sub", opLen2(ssa.OpSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.Add", opLen2(ssa.OpAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Max", opLen2(ssa.OpMaxInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Min", opLen2(ssa.OpMinInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Add", opLen2(ssa.OpAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Max", opLen2(ssa.OpMaxInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Min", opLen2(ssa.OpMinInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Sub", opLen2(ssa.OpSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.Add", opLen2(ssa.OpAddInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Max", opLen2(ssa.OpMaxInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Min", opLen2(ssa.OpMinInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Sub", opLen2(ssa.OpSubInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.Add", opLen2(ssa.OpAddInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Max", opLen2(ssa.OpMaxInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Min", opLen2(ssa.OpMinInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Sub", opLen2(ssa.OpSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Add", opLen2(ssa.OpAddInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Max", opLen2(ssa.OpMaxInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Min", opLen2(ssa.OpMinInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Sub", opLen2(ssa.OpSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.Add", opLen2(ssa.OpAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.And", opLen2(ssa.OpAndInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Max", opLen2(ssa.OpMaxInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Min", opLen2(ssa.OpMinInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Or", opLen2(ssa.OpOrInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Sub", opLen2(ssa.OpSubInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Xor", opLen2(ssa.OpXorInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.Add", opLen2(ssa.OpAddInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x4.And", opLen2(ssa.OpAndInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Max", opLen2(ssa.OpMaxInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Min", opLen2(ssa.OpMinInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Or", opLen2(ssa.OpOrInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Sub", opLen2(ssa.OpSubInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Xor", opLen2(ssa.OpXorInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.Add", opLen2(ssa.OpAddInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.And", opLen2(ssa.OpAndInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Max", opLen2(ssa.OpMaxInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Min", opLen2(ssa.OpMinInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Sub", opLen2(ssa.OpSubInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Add", opLen2(ssa.OpAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Max", opLen2(ssa.OpMaxInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Min", opLen2(ssa.OpMinInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Sub", opLen2(ssa.OpSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Add", opLen2(ssa.OpAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Max", opLen2(ssa.OpMaxInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Min", opLen2(ssa.OpMinInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Sub", opLen2(ssa.OpSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Add", opLen2(ssa.OpAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Max", opLen2(ssa.OpMaxInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Min", opLen2(ssa.OpMinInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Sub", opLen2(ssa.OpSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x16.Add", opLen2(ssa.OpAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.AndNot", opLen2(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Max", opLen2(ssa.OpMaxUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Min", opLen2(ssa.OpMinUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Max", opLen2(ssa.OpMaxUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Min", opLen2(ssa.OpMinUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Max", opLen2(ssa.OpMaxUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Min", opLen2(ssa.OpMinUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Max", opLen2(ssa.OpMaxUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Min", opLen2(ssa.OpMinUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Sub", opLen2(ssa.OpSubUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Add", opLen2(ssa.OpAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.AndNot", opLen2(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Max", opLen2(ssa.OpMaxUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Min", opLen2(ssa.OpMinUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Sub", opLen2(ssa.OpSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Add", opLen2(ssa.OpAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.AndNot", opLen2(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Max", opLen2(ssa.OpMaxUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Min", opLen2(ssa.OpMinUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Sub", opLen2(ssa.OpSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Add", opLen2(ssa.OpAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Max", opLen2(ssa.OpMaxUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Min", opLen2(ssa.OpMinUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Max", opLen2(ssa.OpMaxUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Min", opLen2(ssa.OpMinUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Max", opLen2(ssa.OpMaxUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Min", opLen2(ssa.OpMinUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.LessEqual", opLen2(ssa.OpLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Max", opLen2(ssa.OpMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Max", opLen2(ssa.OpMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Max", opLen2(ssa.OpMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Max", opLen2(ssa.OpMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Max", opLen2(ssa.OpMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Max", opLen2(ssa.OpMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Max", opLen2(ssa.OpMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Max", opLen2(ssa.OpMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Max", opLen2(ssa.OpMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Max", opLen2(ssa.OpMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Max", opLen2(ssa.OpMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Max", opLen2(ssa.OpMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Max", opLen2(ssa.OpMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Max", opLen2(ssa.OpMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Max", opLen2(ssa.OpMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Max", opLen2(ssa.OpMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Max", opLen2(ssa.OpMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Max", opLen2(ssa.OpMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Max", opLen2(ssa.OpMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Max", opLen2(ssa.OpMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Max", opLen2(ssa.OpMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Max", opLen2(ssa.OpMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Max", opLen2(ssa.OpMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Max", opLen2(ssa.OpMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Min", opLen2(ssa.OpMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Min", opLen2(ssa.OpMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Min", opLen2(ssa.OpMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Min", opLen2(ssa.OpMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Min", opLen2(ssa.OpMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Min", opLen2(ssa.OpMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Min", opLen2(ssa.OpMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Min", opLen2(ssa.OpMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Min", opLen2(ssa.OpMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Min", opLen2(ssa.OpMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Min", opLen2(ssa.OpMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Min", opLen2(ssa.OpMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Min", opLen2(ssa.OpMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Min", opLen2(ssa.OpMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Min", opLen2(ssa.OpMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Min", opLen2(ssa.OpMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Min", opLen2(ssa.OpMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Min", opLen2(ssa.OpMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Min", opLen2(ssa.OpMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Min", opLen2(ssa.OpMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Min", opLen2(ssa.OpMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Min", opLen2(ssa.OpMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Min", opLen2(ssa.OpMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Min", opLen2(ssa.OpMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Or", opLen2(ssa.OpOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Or", opLen2(ssa.OpOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1454,6 +1430,94 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Sub", opLen2(ssa.OpSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Sub", opLen2(ssa.OpSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Sub", opLen2(ssa.OpSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Sub", opLen2(ssa.OpSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Sub", opLen2(ssa.OpSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Sub", opLen2(ssa.OpSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Sub", opLen2(ssa.OpSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Sub", opLen2(ssa.OpSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Sub", opLen2(ssa.OpSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Sub", opLen2(ssa.OpSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Sub", opLen2(ssa.OpSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Sub", opLen2(ssa.OpSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Sub", opLen2(ssa.OpSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Sub", opLen2(ssa.OpSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Sub", opLen2(ssa.OpSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Sub", opLen2(ssa.OpSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1466,102 +1530,38 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Xor", opLen2(ssa.OpXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Xor", opLen2(ssa.OpXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) @@ -1832,6 +1832,34 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) @@ -1888,34 +1916,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) From 3a4d10bfca5cca54b69c50123d1245604c334e0f Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 17 Jun 2025 11:57:19 -0400 Subject: [PATCH 032/139] [dev.simd] cmd/compile: removed a map iteration from generator; tweaked type order Output of CL 682316 Change-Id: I566486085fbd8a5437a5904ed02f718da7fed2c9 Reviewed-on: https://go-review.googlesource.com/c/go/+/682355 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssagen/simdintrinsics.go | 422 ++++----- src/simd/stubs_amd64.go | 810 +++++++++--------- 2 files changed, 616 insertions(+), 616 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 4b1f8a212ae2f8..58e2e79eec12a6 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1562,360 +1562,376 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) @@ -1924,14 +1940,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) @@ -1940,6 +1948,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) @@ -1952,16 +1964,4 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 65332bf3fa7078..c409d9663ff2f5 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -8702,36 +8702,12 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Xor(y Uint64x8) Uint64x8 -// Float64x8 converts from Float32x16 to Float64x8 -func (from Float32x16) AsFloat64x8() (to Float64x8) - -// Int16x32 converts from Float32x16 to Int16x32 -func (from Float32x16) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Float32x16 to Int32x16 -func (from Float32x16) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Float32x16 to Int64x8 -func (from Float32x16) AsInt64x8() (to Int64x8) - -// Int8x64 converts from Float32x16 to Int8x64 -func (from Float32x16) AsInt8x64() (to Int8x64) - -// Uint16x32 converts from Float32x16 to Uint16x32 -func (from Float32x16) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Float32x16 to Uint32x16 -func (from Float32x16) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Float32x16 to Uint64x8 -func (from Float32x16) AsUint64x8() (to Uint64x8) - -// Uint8x64 converts from Float32x16 to Uint8x64 -func (from Float32x16) AsUint8x64() (to Uint8x64) - // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Float32x4 to Int8x16 +func (from Float32x4) AsInt8x16() (to Int8x16) + // Int16x8 converts from Float32x4 to Int16x8 func (from Float32x4) AsInt16x8() (to Int16x8) @@ -8741,8 +8717,8 @@ func (from Float32x4) AsInt32x4() (to Int32x4) // Int64x2 converts from Float32x4 to Int64x2 func (from Float32x4) AsInt64x2() (to Int64x2) -// Int8x16 converts from Float32x4 to Int8x16 -func (from Float32x4) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Float32x4 to Uint8x16 +func (from Float32x4) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Float32x4 to Uint16x8 func (from Float32x4) AsUint16x8() (to Uint16x8) @@ -8753,12 +8729,12 @@ func (from Float32x4) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Float32x4 to Uint64x2 func (from Float32x4) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Float32x4 to Uint8x16 -func (from Float32x4) AsUint8x16() (to Uint8x16) - // Float64x4 converts from Float32x8 to Float64x4 func (from Float32x8) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Float32x8 to Int8x32 +func (from Float32x8) AsInt8x32() (to Int8x32) + // Int16x16 converts from Float32x8 to Int16x16 func (from Float32x8) AsInt16x16() (to Int16x16) @@ -8768,8 +8744,8 @@ func (from Float32x8) AsInt32x8() (to Int32x8) // Int64x4 converts from Float32x8 to Int64x4 func (from Float32x8) AsInt64x4() (to Int64x4) -// Int8x32 converts from Float32x8 to Int8x32 -func (from Float32x8) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Float32x8 to Uint8x32 +func (from Float32x8) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Float32x8 to Uint16x16 func (from Float32x8) AsUint16x16() (to Uint16x16) @@ -8780,12 +8756,39 @@ func (from Float32x8) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Float32x8 to Uint64x4 func (from Float32x8) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Float32x8 to Uint8x32 -func (from Float32x8) AsUint8x32() (to Uint8x32) +// Float64x8 converts from Float32x16 to Float64x8 +func (from Float32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Float32x16 to Int8x64 +func (from Float32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Float32x16 to Int16x32 +func (from Float32x16) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Float32x16 to Int32x16 +func (from Float32x16) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Float32x16 to Int64x8 +func (from Float32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float32x16 to Uint8x64 +func (from Float32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Float32x16 to Uint16x32 +func (from Float32x16) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Float32x16 to Uint32x16 +func (from Float32x16) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Float32x16 to Uint64x8 +func (from Float32x16) AsUint64x8() (to Uint64x8) // Float32x4 converts from Float64x2 to Float32x4 func (from Float64x2) AsFloat32x4() (to Float32x4) +// Int8x16 converts from Float64x2 to Int8x16 +func (from Float64x2) AsInt8x16() (to Int8x16) + // Int16x8 converts from Float64x2 to Int16x8 func (from Float64x2) AsInt16x8() (to Int16x8) @@ -8795,8 +8798,8 @@ func (from Float64x2) AsInt32x4() (to Int32x4) // Int64x2 converts from Float64x2 to Int64x2 func (from Float64x2) AsInt64x2() (to Int64x2) -// Int8x16 converts from Float64x2 to Int8x16 -func (from Float64x2) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Float64x2 to Uint8x16 +func (from Float64x2) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Float64x2 to Uint16x8 func (from Float64x2) AsUint16x8() (to Uint16x8) @@ -8807,12 +8810,12 @@ func (from Float64x2) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Float64x2 to Uint64x2 func (from Float64x2) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Float64x2 to Uint8x16 -func (from Float64x2) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Float64x4 to Float32x8 func (from Float64x4) AsFloat32x8() (to Float32x8) +// Int8x32 converts from Float64x4 to Int8x32 +func (from Float64x4) AsInt8x32() (to Int8x32) + // Int16x16 converts from Float64x4 to Int16x16 func (from Float64x4) AsInt16x16() (to Int16x16) @@ -8822,8 +8825,8 @@ func (from Float64x4) AsInt32x8() (to Int32x8) // Int64x4 converts from Float64x4 to Int64x4 func (from Float64x4) AsInt64x4() (to Int64x4) -// Int8x32 converts from Float64x4 to Int8x32 -func (from Float64x4) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Float64x4 to Uint8x32 +func (from Float64x4) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Float64x4 to Uint16x16 func (from Float64x4) AsUint16x16() (to Uint16x16) @@ -8834,12 +8837,12 @@ func (from Float64x4) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Float64x4 to Uint64x4 func (from Float64x4) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Float64x4 to Uint8x32 -func (from Float64x4) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Float64x8 to Float32x16 func (from Float64x8) AsFloat32x16() (to Float32x16) +// Int8x64 converts from Float64x8 to Int8x64 +func (from Float64x8) AsInt8x64() (to Int8x64) + // Int16x32 converts from Float64x8 to Int16x32 func (from Float64x8) AsInt16x32() (to Int16x32) @@ -8849,8 +8852,8 @@ func (from Float64x8) AsInt32x16() (to Int32x16) // Int64x8 converts from Float64x8 to Int64x8 func (from Float64x8) AsInt64x8() (to Int64x8) -// Int8x64 converts from Float64x8 to Int8x64 -func (from Float64x8) AsInt8x64() (to Int8x64) +// Uint8x64 converts from Float64x8 to Uint8x64 +func (from Float64x8) AsUint8x64() (to Uint8x64) // Uint16x32 converts from Float64x8 to Uint16x32 func (from Float64x8) AsUint16x32() (to Uint16x32) @@ -8861,62 +8864,86 @@ func (from Float64x8) AsUint32x16() (to Uint32x16) // Uint64x8 converts from Float64x8 to Uint64x8 func (from Float64x8) AsUint64x8() (to Uint64x8) -// Uint8x64 converts from Float64x8 to Uint8x64 -func (from Float64x8) AsUint8x64() (to Uint8x64) +// Float32x4 converts from Int8x16 to Float32x4 +func (from Int8x16) AsFloat32x4() (to Float32x4) -// Float32x8 converts from Int16x16 to Float32x8 -func (from Int16x16) AsFloat32x8() (to Float32x8) +// Float64x2 converts from Int8x16 to Float64x2 +func (from Int8x16) AsFloat64x2() (to Float64x2) -// Float64x4 converts from Int16x16 to Float64x4 -func (from Int16x16) AsFloat64x4() (to Float64x4) +// Int16x8 converts from Int8x16 to Int16x8 +func (from Int8x16) AsInt16x8() (to Int16x8) -// Int32x8 converts from Int16x16 to Int32x8 -func (from Int16x16) AsInt32x8() (to Int32x8) +// Int32x4 converts from Int8x16 to Int32x4 +func (from Int8x16) AsInt32x4() (to Int32x4) -// Int64x4 converts from Int16x16 to Int64x4 -func (from Int16x16) AsInt64x4() (to Int64x4) +// Int64x2 converts from Int8x16 to Int64x2 +func (from Int8x16) AsInt64x2() (to Int64x2) -// Int8x32 converts from Int16x16 to Int8x32 -func (from Int16x16) AsInt8x32() (to Int8x32) +// Uint8x16 converts from Int8x16 to Uint8x16 +func (from Int8x16) AsUint8x16() (to Uint8x16) -// Uint16x16 converts from Int16x16 to Uint16x16 -func (from Int16x16) AsUint16x16() (to Uint16x16) +// Uint16x8 converts from Int8x16 to Uint16x8 +func (from Int8x16) AsUint16x8() (to Uint16x8) -// Uint32x8 converts from Int16x16 to Uint32x8 -func (from Int16x16) AsUint32x8() (to Uint32x8) +// Uint32x4 converts from Int8x16 to Uint32x4 +func (from Int8x16) AsUint32x4() (to Uint32x4) -// Uint64x4 converts from Int16x16 to Uint64x4 -func (from Int16x16) AsUint64x4() (to Uint64x4) +// Uint64x2 converts from Int8x16 to Uint64x2 +func (from Int8x16) AsUint64x2() (to Uint64x2) -// Uint8x32 converts from Int16x16 to Uint8x32 -func (from Int16x16) AsUint8x32() (to Uint8x32) +// Float32x8 converts from Int8x32 to Float32x8 +func (from Int8x32) AsFloat32x8() (to Float32x8) -// Float32x16 converts from Int16x32 to Float32x16 -func (from Int16x32) AsFloat32x16() (to Float32x16) +// Float64x4 converts from Int8x32 to Float64x4 +func (from Int8x32) AsFloat64x4() (to Float64x4) -// Float64x8 converts from Int16x32 to Float64x8 -func (from Int16x32) AsFloat64x8() (to Float64x8) +// Int16x16 converts from Int8x32 to Int16x16 +func (from Int8x32) AsInt16x16() (to Int16x16) -// Int32x16 converts from Int16x32 to Int32x16 -func (from Int16x32) AsInt32x16() (to Int32x16) +// Int32x8 converts from Int8x32 to Int32x8 +func (from Int8x32) AsInt32x8() (to Int32x8) -// Int64x8 converts from Int16x32 to Int64x8 -func (from Int16x32) AsInt64x8() (to Int64x8) +// Int64x4 converts from Int8x32 to Int64x4 +func (from Int8x32) AsInt64x4() (to Int64x4) -// Int8x64 converts from Int16x32 to Int8x64 -func (from Int16x32) AsInt8x64() (to Int8x64) +// Uint8x32 converts from Int8x32 to Uint8x32 +func (from Int8x32) AsUint8x32() (to Uint8x32) -// Uint16x32 converts from Int16x32 to Uint16x32 -func (from Int16x32) AsUint16x32() (to Uint16x32) +// Uint16x16 converts from Int8x32 to Uint16x16 +func (from Int8x32) AsUint16x16() (to Uint16x16) -// Uint32x16 converts from Int16x32 to Uint32x16 -func (from Int16x32) AsUint32x16() (to Uint32x16) +// Uint32x8 converts from Int8x32 to Uint32x8 +func (from Int8x32) AsUint32x8() (to Uint32x8) -// Uint64x8 converts from Int16x32 to Uint64x8 -func (from Int16x32) AsUint64x8() (to Uint64x8) +// Uint64x4 converts from Int8x32 to Uint64x4 +func (from Int8x32) AsUint64x4() (to Uint64x4) -// Uint8x64 converts from Int16x32 to Uint8x64 -func (from Int16x32) AsUint8x64() (to Uint8x64) +// Float32x16 converts from Int8x64 to Float32x16 +func (from Int8x64) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int8x64 to Float64x8 +func (from Int8x64) AsFloat64x8() (to Float64x8) + +// Int16x32 converts from Int8x64 to Int16x32 +func (from Int8x64) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Int8x64 to Int32x16 +func (from Int8x64) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Int8x64 to Int64x8 +func (from Int8x64) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int8x64 to Uint8x64 +func (from Int8x64) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int8x64 to Uint16x32 +func (from Int8x64) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int8x64 to Uint32x16 +func (from Int8x64) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int8x64 to Uint64x8 +func (from Int8x64) AsUint64x8() (to Uint64x8) // Float32x4 converts from Int16x8 to Float32x4 func (from Int16x8) AsFloat32x4() (to Float32x4) @@ -8924,14 +8951,17 @@ func (from Int16x8) AsFloat32x4() (to Float32x4) // Float64x2 converts from Int16x8 to Float64x2 func (from Int16x8) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Int16x8 to Int8x16 +func (from Int16x8) AsInt8x16() (to Int8x16) + // Int32x4 converts from Int16x8 to Int32x4 func (from Int16x8) AsInt32x4() (to Int32x4) // Int64x2 converts from Int16x8 to Int64x2 func (from Int16x8) AsInt64x2() (to Int64x2) -// Int8x16 converts from Int16x8 to Int8x16 -func (from Int16x8) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Int16x8 to Uint8x16 +func (from Int16x8) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Int16x8 to Uint16x8 func (from Int16x8) AsUint16x8() (to Uint16x8) @@ -8942,51 +8972,78 @@ func (from Int16x8) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Int16x8 to Uint64x2 func (from Int16x8) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int16x8 to Uint8x16 -func (from Int16x8) AsUint8x16() (to Uint8x16) +// Float32x8 converts from Int16x16 to Float32x8 +func (from Int16x16) AsFloat32x8() (to Float32x8) -// Float32x16 converts from Int32x16 to Float32x16 -func (from Int32x16) AsFloat32x16() (to Float32x16) +// Float64x4 converts from Int16x16 to Float64x4 +func (from Int16x16) AsFloat64x4() (to Float64x4) -// Float64x8 converts from Int32x16 to Float64x8 -func (from Int32x16) AsFloat64x8() (to Float64x8) +// Int8x32 converts from Int16x16 to Int8x32 +func (from Int16x16) AsInt8x32() (to Int8x32) -// Int16x32 converts from Int32x16 to Int16x32 -func (from Int32x16) AsInt16x32() (to Int16x32) +// Int32x8 converts from Int16x16 to Int32x8 +func (from Int16x16) AsInt32x8() (to Int32x8) -// Int64x8 converts from Int32x16 to Int64x8 -func (from Int32x16) AsInt64x8() (to Int64x8) +// Int64x4 converts from Int16x16 to Int64x4 +func (from Int16x16) AsInt64x4() (to Int64x4) -// Int8x64 converts from Int32x16 to Int8x64 -func (from Int32x16) AsInt8x64() (to Int8x64) +// Uint8x32 converts from Int16x16 to Uint8x32 +func (from Int16x16) AsUint8x32() (to Uint8x32) -// Uint16x32 converts from Int32x16 to Uint16x32 -func (from Int32x16) AsUint16x32() (to Uint16x32) +// Uint16x16 converts from Int16x16 to Uint16x16 +func (from Int16x16) AsUint16x16() (to Uint16x16) -// Uint32x16 converts from Int32x16 to Uint32x16 -func (from Int32x16) AsUint32x16() (to Uint32x16) +// Uint32x8 converts from Int16x16 to Uint32x8 +func (from Int16x16) AsUint32x8() (to Uint32x8) -// Uint64x8 converts from Int32x16 to Uint64x8 -func (from Int32x16) AsUint64x8() (to Uint64x8) +// Uint64x4 converts from Int16x16 to Uint64x4 +func (from Int16x16) AsUint64x4() (to Uint64x4) -// Uint8x64 converts from Int32x16 to Uint8x64 -func (from Int32x16) AsUint8x64() (to Uint8x64) +// Float32x16 converts from Int16x32 to Float32x16 +func (from Int16x32) AsFloat32x16() (to Float32x16) -// Float32x4 converts from Int32x4 to Float32x4 -func (from Int32x4) AsFloat32x4() (to Float32x4) +// Float64x8 converts from Int16x32 to Float64x8 +func (from Int16x32) AsFloat64x8() (to Float64x8) -// Float64x2 converts from Int32x4 to Float64x2 -func (from Int32x4) AsFloat64x2() (to Float64x2) +// Int8x64 converts from Int16x32 to Int8x64 +func (from Int16x32) AsInt8x64() (to Int8x64) -// Int16x8 converts from Int32x4 to Int16x8 -func (from Int32x4) AsInt16x8() (to Int16x8) +// Int32x16 converts from Int16x32 to Int32x16 +func (from Int16x32) AsInt32x16() (to Int32x16) -// Int64x2 converts from Int32x4 to Int64x2 -func (from Int32x4) AsInt64x2() (to Int64x2) +// Int64x8 converts from Int16x32 to Int64x8 +func (from Int16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int16x32 to Uint8x64 +func (from Int16x32) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int16x32 to Uint16x32 +func (from Int16x32) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int16x32 to Uint32x16 +func (from Int16x32) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int16x32 to Uint64x8 +func (from Int16x32) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Int32x4 to Float32x4 +func (from Int32x4) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Int32x4 to Float64x2 +func (from Int32x4) AsFloat64x2() (to Float64x2) // Int8x16 converts from Int32x4 to Int8x16 func (from Int32x4) AsInt8x16() (to Int8x16) +// Int16x8 converts from Int32x4 to Int16x8 +func (from Int32x4) AsInt16x8() (to Int16x8) + +// Int64x2 converts from Int32x4 to Int64x2 +func (from Int32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int32x4 to Uint8x16 +func (from Int32x4) AsUint8x16() (to Uint8x16) + // Uint16x8 converts from Int32x4 to Uint16x8 func (from Int32x4) AsUint16x8() (to Uint16x8) @@ -8996,23 +9053,23 @@ func (from Int32x4) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Int32x4 to Uint64x2 func (from Int32x4) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int32x4 to Uint8x16 -func (from Int32x4) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Int32x8 to Float32x8 func (from Int32x8) AsFloat32x8() (to Float32x8) // Float64x4 converts from Int32x8 to Float64x4 func (from Int32x8) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Int32x8 to Int8x32 +func (from Int32x8) AsInt8x32() (to Int8x32) + // Int16x16 converts from Int32x8 to Int16x16 func (from Int32x8) AsInt16x16() (to Int16x16) // Int64x4 converts from Int32x8 to Int64x4 func (from Int32x8) AsInt64x4() (to Int64x4) -// Int8x32 converts from Int32x8 to Int8x32 -func (from Int32x8) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Int32x8 to Uint8x32 +func (from Int32x8) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Int32x8 to Uint16x16 func (from Int32x8) AsUint16x16() (to Uint16x16) @@ -9023,8 +9080,32 @@ func (from Int32x8) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Int32x8 to Uint64x4 func (from Int32x8) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Int32x8 to Uint8x32 -func (from Int32x8) AsUint8x32() (to Uint8x32) +// Float32x16 converts from Int32x16 to Float32x16 +func (from Int32x16) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int32x16 to Float64x8 +func (from Int32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Int32x16 to Int8x64 +func (from Int32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Int32x16 to Int16x32 +func (from Int32x16) AsInt16x32() (to Int16x32) + +// Int64x8 converts from Int32x16 to Int64x8 +func (from Int32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int32x16 to Uint8x64 +func (from Int32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int32x16 to Uint16x32 +func (from Int32x16) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int32x16 to Uint32x16 +func (from Int32x16) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int32x16 to Uint64x8 +func (from Int32x16) AsUint64x8() (to Uint64x8) // Float32x4 converts from Int64x2 to Float32x4 func (from Int64x2) AsFloat32x4() (to Float32x4) @@ -9032,14 +9113,17 @@ func (from Int64x2) AsFloat32x4() (to Float32x4) // Float64x2 converts from Int64x2 to Float64x2 func (from Int64x2) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Int64x2 to Int8x16 +func (from Int64x2) AsInt8x16() (to Int8x16) + // Int16x8 converts from Int64x2 to Int16x8 func (from Int64x2) AsInt16x8() (to Int16x8) // Int32x4 converts from Int64x2 to Int32x4 func (from Int64x2) AsInt32x4() (to Int32x4) -// Int8x16 converts from Int64x2 to Int8x16 -func (from Int64x2) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Int64x2 to Uint8x16 +func (from Int64x2) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Int64x2 to Uint16x8 func (from Int64x2) AsUint16x8() (to Uint16x8) @@ -9050,23 +9134,23 @@ func (from Int64x2) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Int64x2 to Uint64x2 func (from Int64x2) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int64x2 to Uint8x16 -func (from Int64x2) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Int64x4 to Float32x8 func (from Int64x4) AsFloat32x8() (to Float32x8) // Float64x4 converts from Int64x4 to Float64x4 func (from Int64x4) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Int64x4 to Int8x32 +func (from Int64x4) AsInt8x32() (to Int8x32) + // Int16x16 converts from Int64x4 to Int16x16 func (from Int64x4) AsInt16x16() (to Int16x16) // Int32x8 converts from Int64x4 to Int32x8 func (from Int64x4) AsInt32x8() (to Int32x8) -// Int8x32 converts from Int64x4 to Int8x32 -func (from Int64x4) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Int64x4 to Uint8x32 +func (from Int64x4) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Int64x4 to Uint16x16 func (from Int64x4) AsUint16x16() (to Uint16x16) @@ -9077,23 +9161,23 @@ func (from Int64x4) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Int64x4 to Uint64x4 func (from Int64x4) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Int64x4 to Uint8x32 -func (from Int64x4) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Int64x8 to Float32x16 func (from Int64x8) AsFloat32x16() (to Float32x16) // Float64x8 converts from Int64x8 to Float64x8 func (from Int64x8) AsFloat64x8() (to Float64x8) +// Int8x64 converts from Int64x8 to Int8x64 +func (from Int64x8) AsInt8x64() (to Int8x64) + // Int16x32 converts from Int64x8 to Int16x32 func (from Int64x8) AsInt16x32() (to Int16x32) // Int32x16 converts from Int64x8 to Int32x16 func (from Int64x8) AsInt32x16() (to Int32x16) -// Int8x64 converts from Int64x8 to Int8x64 -func (from Int64x8) AsInt8x64() (to Int8x64) +// Uint8x64 converts from Int64x8 to Uint8x64 +func (from Int64x8) AsUint8x64() (to Uint8x64) // Uint16x32 converts from Int64x8 to Uint16x32 func (from Int64x8) AsUint16x32() (to Uint16x32) @@ -9104,89 +9188,113 @@ func (from Int64x8) AsUint32x16() (to Uint32x16) // Uint64x8 converts from Int64x8 to Uint64x8 func (from Int64x8) AsUint64x8() (to Uint64x8) -// Uint8x64 converts from Int64x8 to Uint8x64 -func (from Int64x8) AsUint8x64() (to Uint8x64) +// Float32x4 converts from Uint8x16 to Float32x4 +func (from Uint8x16) AsFloat32x4() (to Float32x4) -// Float32x4 converts from Int8x16 to Float32x4 -func (from Int8x16) AsFloat32x4() (to Float32x4) +// Float64x2 converts from Uint8x16 to Float64x2 +func (from Uint8x16) AsFloat64x2() (to Float64x2) -// Float64x2 converts from Int8x16 to Float64x2 -func (from Int8x16) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Uint8x16 to Int8x16 +func (from Uint8x16) AsInt8x16() (to Int8x16) -// Int16x8 converts from Int8x16 to Int16x8 -func (from Int8x16) AsInt16x8() (to Int16x8) +// Int16x8 converts from Uint8x16 to Int16x8 +func (from Uint8x16) AsInt16x8() (to Int16x8) -// Int32x4 converts from Int8x16 to Int32x4 -func (from Int8x16) AsInt32x4() (to Int32x4) +// Int32x4 converts from Uint8x16 to Int32x4 +func (from Uint8x16) AsInt32x4() (to Int32x4) -// Int64x2 converts from Int8x16 to Int64x2 -func (from Int8x16) AsInt64x2() (to Int64x2) +// Int64x2 converts from Uint8x16 to Int64x2 +func (from Uint8x16) AsInt64x2() (to Int64x2) -// Uint16x8 converts from Int8x16 to Uint16x8 -func (from Int8x16) AsUint16x8() (to Uint16x8) +// Uint16x8 converts from Uint8x16 to Uint16x8 +func (from Uint8x16) AsUint16x8() (to Uint16x8) -// Uint32x4 converts from Int8x16 to Uint32x4 -func (from Int8x16) AsUint32x4() (to Uint32x4) +// Uint32x4 converts from Uint8x16 to Uint32x4 +func (from Uint8x16) AsUint32x4() (to Uint32x4) -// Uint64x2 converts from Int8x16 to Uint64x2 -func (from Int8x16) AsUint64x2() (to Uint64x2) +// Uint64x2 converts from Uint8x16 to Uint64x2 +func (from Uint8x16) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int8x16 to Uint8x16 -func (from Int8x16) AsUint8x16() (to Uint8x16) +// Float32x8 converts from Uint8x32 to Float32x8 +func (from Uint8x32) AsFloat32x8() (to Float32x8) -// Float32x8 converts from Int8x32 to Float32x8 -func (from Int8x32) AsFloat32x8() (to Float32x8) +// Float64x4 converts from Uint8x32 to Float64x4 +func (from Uint8x32) AsFloat64x4() (to Float64x4) -// Float64x4 converts from Int8x32 to Float64x4 -func (from Int8x32) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint8x32 to Int8x32 +func (from Uint8x32) AsInt8x32() (to Int8x32) -// Int16x16 converts from Int8x32 to Int16x16 -func (from Int8x32) AsInt16x16() (to Int16x16) +// Int16x16 converts from Uint8x32 to Int16x16 +func (from Uint8x32) AsInt16x16() (to Int16x16) -// Int32x8 converts from Int8x32 to Int32x8 -func (from Int8x32) AsInt32x8() (to Int32x8) +// Int32x8 converts from Uint8x32 to Int32x8 +func (from Uint8x32) AsInt32x8() (to Int32x8) -// Int64x4 converts from Int8x32 to Int64x4 -func (from Int8x32) AsInt64x4() (to Int64x4) +// Int64x4 converts from Uint8x32 to Int64x4 +func (from Uint8x32) AsInt64x4() (to Int64x4) -// Uint16x16 converts from Int8x32 to Uint16x16 -func (from Int8x32) AsUint16x16() (to Uint16x16) +// Uint16x16 converts from Uint8x32 to Uint16x16 +func (from Uint8x32) AsUint16x16() (to Uint16x16) -// Uint32x8 converts from Int8x32 to Uint32x8 -func (from Int8x32) AsUint32x8() (to Uint32x8) +// Uint32x8 converts from Uint8x32 to Uint32x8 +func (from Uint8x32) AsUint32x8() (to Uint32x8) -// Uint64x4 converts from Int8x32 to Uint64x4 -func (from Int8x32) AsUint64x4() (to Uint64x4) +// Uint64x4 converts from Uint8x32 to Uint64x4 +func (from Uint8x32) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Int8x32 to Uint8x32 -func (from Int8x32) AsUint8x32() (to Uint8x32) +// Float32x16 converts from Uint8x64 to Float32x16 +func (from Uint8x64) AsFloat32x16() (to Float32x16) -// Float32x16 converts from Int8x64 to Float32x16 -func (from Int8x64) AsFloat32x16() (to Float32x16) +// Float64x8 converts from Uint8x64 to Float64x8 +func (from Uint8x64) AsFloat64x8() (to Float64x8) -// Float64x8 converts from Int8x64 to Float64x8 -func (from Int8x64) AsFloat64x8() (to Float64x8) +// Int8x64 converts from Uint8x64 to Int8x64 +func (from Uint8x64) AsInt8x64() (to Int8x64) -// Int16x32 converts from Int8x64 to Int16x32 -func (from Int8x64) AsInt16x32() (to Int16x32) +// Int16x32 converts from Uint8x64 to Int16x32 +func (from Uint8x64) AsInt16x32() (to Int16x32) -// Int32x16 converts from Int8x64 to Int32x16 -func (from Int8x64) AsInt32x16() (to Int32x16) +// Int32x16 converts from Uint8x64 to Int32x16 +func (from Uint8x64) AsInt32x16() (to Int32x16) -// Int64x8 converts from Int8x64 to Int64x8 -func (from Int8x64) AsInt64x8() (to Int64x8) +// Int64x8 converts from Uint8x64 to Int64x8 +func (from Uint8x64) AsInt64x8() (to Int64x8) -// Uint16x32 converts from Int8x64 to Uint16x32 -func (from Int8x64) AsUint16x32() (to Uint16x32) +// Uint16x32 converts from Uint8x64 to Uint16x32 +func (from Uint8x64) AsUint16x32() (to Uint16x32) -// Uint32x16 converts from Int8x64 to Uint32x16 -func (from Int8x64) AsUint32x16() (to Uint32x16) +// Uint32x16 converts from Uint8x64 to Uint32x16 +func (from Uint8x64) AsUint32x16() (to Uint32x16) -// Uint64x8 converts from Int8x64 to Uint64x8 -func (from Int8x64) AsUint64x8() (to Uint64x8) +// Uint64x8 converts from Uint8x64 to Uint64x8 +func (from Uint8x64) AsUint64x8() (to Uint64x8) -// Uint8x64 converts from Int8x64 to Uint8x64 -func (from Int8x64) AsUint8x64() (to Uint8x64) +// Float32x4 converts from Uint16x8 to Float32x4 +func (from Uint16x8) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint16x8 to Float64x2 +func (from Uint16x8) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Uint16x8 to Int8x16 +func (from Uint16x8) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint16x8 to Int16x8 +func (from Uint16x8) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Uint16x8 to Int32x4 +func (from Uint16x8) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Uint16x8 to Int64x2 +func (from Uint16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint16x8 to Uint8x16 +func (from Uint16x8) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Uint16x8 to Uint32x4 +func (from Uint16x8) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Uint16x8 to Uint64x2 +func (from Uint16x8) AsUint64x2() (to Uint64x2) // Float32x8 converts from Uint16x16 to Float32x8 func (from Uint16x16) AsFloat32x8() (to Float32x8) @@ -9194,6 +9302,9 @@ func (from Uint16x16) AsFloat32x8() (to Float32x8) // Float64x4 converts from Uint16x16 to Float64x4 func (from Uint16x16) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint16x16 to Int8x32 +func (from Uint16x16) AsInt8x32() (to Int8x32) + // Int16x16 converts from Uint16x16 to Int16x16 func (from Uint16x16) AsInt16x16() (to Int16x16) @@ -9203,8 +9314,8 @@ func (from Uint16x16) AsInt32x8() (to Int32x8) // Int64x4 converts from Uint16x16 to Int64x4 func (from Uint16x16) AsInt64x4() (to Int64x4) -// Int8x32 converts from Uint16x16 to Int8x32 -func (from Uint16x16) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Uint16x16 to Uint8x32 +func (from Uint16x16) AsUint8x32() (to Uint8x32) // Uint32x8 converts from Uint16x16 to Uint32x8 func (from Uint16x16) AsUint32x8() (to Uint32x8) @@ -9212,89 +9323,32 @@ func (from Uint16x16) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Uint16x16 to Uint64x4 func (from Uint16x16) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Uint16x16 to Uint8x32 -func (from Uint16x16) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Uint16x32 to Float32x16 func (from Uint16x32) AsFloat32x16() (to Float32x16) -// Float64x8 converts from Uint16x32 to Float64x8 -func (from Uint16x32) AsFloat64x8() (to Float64x8) - -// Int16x32 converts from Uint16x32 to Int16x32 -func (from Uint16x32) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Uint16x32 to Int32x16 -func (from Uint16x32) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Uint16x32 to Int64x8 -func (from Uint16x32) AsInt64x8() (to Int64x8) - -// Int8x64 converts from Uint16x32 to Int8x64 -func (from Uint16x32) AsInt8x64() (to Int8x64) - -// Uint32x16 converts from Uint16x32 to Uint32x16 -func (from Uint16x32) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Uint16x32 to Uint64x8 -func (from Uint16x32) AsUint64x8() (to Uint64x8) - -// Uint8x64 converts from Uint16x32 to Uint8x64 -func (from Uint16x32) AsUint8x64() (to Uint8x64) - -// Float32x4 converts from Uint16x8 to Float32x4 -func (from Uint16x8) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint16x8 to Float64x2 -func (from Uint16x8) AsFloat64x2() (to Float64x2) - -// Int16x8 converts from Uint16x8 to Int16x8 -func (from Uint16x8) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint16x8 to Int32x4 -func (from Uint16x8) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint16x8 to Int64x2 -func (from Uint16x8) AsInt64x2() (to Int64x2) - -// Int8x16 converts from Uint16x8 to Int8x16 -func (from Uint16x8) AsInt8x16() (to Int8x16) - -// Uint32x4 converts from Uint16x8 to Uint32x4 -func (from Uint16x8) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Uint16x8 to Uint64x2 -func (from Uint16x8) AsUint64x2() (to Uint64x2) - -// Uint8x16 converts from Uint16x8 to Uint8x16 -func (from Uint16x8) AsUint8x16() (to Uint8x16) - -// Float32x16 converts from Uint32x16 to Float32x16 -func (from Uint32x16) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Uint32x16 to Float64x8 -func (from Uint32x16) AsFloat64x8() (to Float64x8) +// Float64x8 converts from Uint16x32 to Float64x8 +func (from Uint16x32) AsFloat64x8() (to Float64x8) -// Int16x32 converts from Uint32x16 to Int16x32 -func (from Uint32x16) AsInt16x32() (to Int16x32) +// Int8x64 converts from Uint16x32 to Int8x64 +func (from Uint16x32) AsInt8x64() (to Int8x64) -// Int32x16 converts from Uint32x16 to Int32x16 -func (from Uint32x16) AsInt32x16() (to Int32x16) +// Int16x32 converts from Uint16x32 to Int16x32 +func (from Uint16x32) AsInt16x32() (to Int16x32) -// Int64x8 converts from Uint32x16 to Int64x8 -func (from Uint32x16) AsInt64x8() (to Int64x8) +// Int32x16 converts from Uint16x32 to Int32x16 +func (from Uint16x32) AsInt32x16() (to Int32x16) -// Int8x64 converts from Uint32x16 to Int8x64 -func (from Uint32x16) AsInt8x64() (to Int8x64) +// Int64x8 converts from Uint16x32 to Int64x8 +func (from Uint16x32) AsInt64x8() (to Int64x8) -// Uint16x32 converts from Uint32x16 to Uint16x32 -func (from Uint32x16) AsUint16x32() (to Uint16x32) +// Uint8x64 converts from Uint16x32 to Uint8x64 +func (from Uint16x32) AsUint8x64() (to Uint8x64) -// Uint64x8 converts from Uint32x16 to Uint64x8 -func (from Uint32x16) AsUint64x8() (to Uint64x8) +// Uint32x16 converts from Uint16x32 to Uint32x16 +func (from Uint16x32) AsUint32x16() (to Uint32x16) -// Uint8x64 converts from Uint32x16 to Uint8x64 -func (from Uint32x16) AsUint8x64() (to Uint8x64) +// Uint64x8 converts from Uint16x32 to Uint64x8 +func (from Uint16x32) AsUint64x8() (to Uint64x8) // Float32x4 converts from Uint32x4 to Float32x4 func (from Uint32x4) AsFloat32x4() (to Float32x4) @@ -9302,6 +9356,9 @@ func (from Uint32x4) AsFloat32x4() (to Float32x4) // Float64x2 converts from Uint32x4 to Float64x2 func (from Uint32x4) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Uint32x4 to Int8x16 +func (from Uint32x4) AsInt8x16() (to Int8x16) + // Int16x8 converts from Uint32x4 to Int16x8 func (from Uint32x4) AsInt16x8() (to Int16x8) @@ -9311,8 +9368,8 @@ func (from Uint32x4) AsInt32x4() (to Int32x4) // Int64x2 converts from Uint32x4 to Int64x2 func (from Uint32x4) AsInt64x2() (to Int64x2) -// Int8x16 converts from Uint32x4 to Int8x16 -func (from Uint32x4) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Uint32x4 to Uint8x16 +func (from Uint32x4) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Uint32x4 to Uint16x8 func (from Uint32x4) AsUint16x8() (to Uint16x8) @@ -9320,15 +9377,15 @@ func (from Uint32x4) AsUint16x8() (to Uint16x8) // Uint64x2 converts from Uint32x4 to Uint64x2 func (from Uint32x4) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Uint32x4 to Uint8x16 -func (from Uint32x4) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Uint32x8 to Float32x8 func (from Uint32x8) AsFloat32x8() (to Float32x8) // Float64x4 converts from Uint32x8 to Float64x4 func (from Uint32x8) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint32x8 to Int8x32 +func (from Uint32x8) AsInt8x32() (to Int8x32) + // Int16x16 converts from Uint32x8 to Int16x16 func (from Uint32x8) AsInt16x16() (to Int16x16) @@ -9338,8 +9395,8 @@ func (from Uint32x8) AsInt32x8() (to Int32x8) // Int64x4 converts from Uint32x8 to Int64x4 func (from Uint32x8) AsInt64x4() (to Int64x4) -// Int8x32 converts from Uint32x8 to Int8x32 -func (from Uint32x8) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Uint32x8 to Uint8x32 +func (from Uint32x8) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Uint32x8 to Uint16x16 func (from Uint32x8) AsUint16x16() (to Uint16x16) @@ -9347,8 +9404,32 @@ func (from Uint32x8) AsUint16x16() (to Uint16x16) // Uint64x4 converts from Uint32x8 to Uint64x4 func (from Uint32x8) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Uint32x8 to Uint8x32 -func (from Uint32x8) AsUint8x32() (to Uint8x32) +// Float32x16 converts from Uint32x16 to Float32x16 +func (from Uint32x16) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Uint32x16 to Float64x8 +func (from Uint32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Uint32x16 to Int8x64 +func (from Uint32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Uint32x16 to Int16x32 +func (from Uint32x16) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Uint32x16 to Int32x16 +func (from Uint32x16) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Uint32x16 to Int64x8 +func (from Uint32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint32x16 to Uint8x64 +func (from Uint32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Uint32x16 to Uint16x32 +func (from Uint32x16) AsUint16x32() (to Uint16x32) + +// Uint64x8 converts from Uint32x16 to Uint64x8 +func (from Uint32x16) AsUint64x8() (to Uint64x8) // Float32x4 converts from Uint64x2 to Float32x4 func (from Uint64x2) AsFloat32x4() (to Float32x4) @@ -9356,6 +9437,9 @@ func (from Uint64x2) AsFloat32x4() (to Float32x4) // Float64x2 converts from Uint64x2 to Float64x2 func (from Uint64x2) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Uint64x2 to Int8x16 +func (from Uint64x2) AsInt8x16() (to Int8x16) + // Int16x8 converts from Uint64x2 to Int16x8 func (from Uint64x2) AsInt16x8() (to Int16x8) @@ -9365,8 +9449,8 @@ func (from Uint64x2) AsInt32x4() (to Int32x4) // Int64x2 converts from Uint64x2 to Int64x2 func (from Uint64x2) AsInt64x2() (to Int64x2) -// Int8x16 converts from Uint64x2 to Int8x16 -func (from Uint64x2) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Uint64x2 to Uint8x16 +func (from Uint64x2) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Uint64x2 to Uint16x8 func (from Uint64x2) AsUint16x8() (to Uint16x8) @@ -9374,15 +9458,15 @@ func (from Uint64x2) AsUint16x8() (to Uint16x8) // Uint32x4 converts from Uint64x2 to Uint32x4 func (from Uint64x2) AsUint32x4() (to Uint32x4) -// Uint8x16 converts from Uint64x2 to Uint8x16 -func (from Uint64x2) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Uint64x4 to Float32x8 func (from Uint64x4) AsFloat32x8() (to Float32x8) // Float64x4 converts from Uint64x4 to Float64x4 func (from Uint64x4) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint64x4 to Int8x32 +func (from Uint64x4) AsInt8x32() (to Int8x32) + // Int16x16 converts from Uint64x4 to Int16x16 func (from Uint64x4) AsInt16x16() (to Int16x16) @@ -9392,8 +9476,8 @@ func (from Uint64x4) AsInt32x8() (to Int32x8) // Int64x4 converts from Uint64x4 to Int64x4 func (from Uint64x4) AsInt64x4() (to Int64x4) -// Int8x32 converts from Uint64x4 to Int8x32 -func (from Uint64x4) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Uint64x4 to Uint8x32 +func (from Uint64x4) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Uint64x4 to Uint16x16 func (from Uint64x4) AsUint16x16() (to Uint16x16) @@ -9401,15 +9485,15 @@ func (from Uint64x4) AsUint16x16() (to Uint16x16) // Uint32x8 converts from Uint64x4 to Uint32x8 func (from Uint64x4) AsUint32x8() (to Uint32x8) -// Uint8x32 converts from Uint64x4 to Uint8x32 -func (from Uint64x4) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Uint64x8 to Float32x16 func (from Uint64x8) AsFloat32x16() (to Float32x16) // Float64x8 converts from Uint64x8 to Float64x8 func (from Uint64x8) AsFloat64x8() (to Float64x8) +// Int8x64 converts from Uint64x8 to Int8x64 +func (from Uint64x8) AsInt8x64() (to Int8x64) + // Int16x32 converts from Uint64x8 to Int16x32 func (from Uint64x8) AsInt16x32() (to Int16x32) @@ -9419,8 +9503,8 @@ func (from Uint64x8) AsInt32x16() (to Int32x16) // Int64x8 converts from Uint64x8 to Int64x8 func (from Uint64x8) AsInt64x8() (to Int64x8) -// Int8x64 converts from Uint64x8 to Int8x64 -func (from Uint64x8) AsInt8x64() (to Int8x64) +// Uint8x64 converts from Uint64x8 to Uint8x64 +func (from Uint64x8) AsUint8x64() (to Uint8x64) // Uint16x32 converts from Uint64x8 to Uint16x32 func (from Uint64x8) AsUint16x32() (to Uint16x32) @@ -9428,89 +9512,45 @@ func (from Uint64x8) AsUint16x32() (to Uint16x32) // Uint32x16 converts from Uint64x8 to Uint32x16 func (from Uint64x8) AsUint32x16() (to Uint32x16) -// Uint8x64 converts from Uint64x8 to Uint8x64 -func (from Uint64x8) AsUint8x64() (to Uint8x64) - -// Float32x4 converts from Uint8x16 to Float32x4 -func (from Uint8x16) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint8x16 to Float64x2 -func (from Uint8x16) AsFloat64x2() (to Float64x2) - -// Int16x8 converts from Uint8x16 to Int16x8 -func (from Uint8x16) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint8x16 to Int32x4 -func (from Uint8x16) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint8x16 to Int64x2 -func (from Uint8x16) AsInt64x2() (to Int64x2) - -// Int8x16 converts from Uint8x16 to Int8x16 -func (from Uint8x16) AsInt8x16() (to Int8x16) - -// Uint16x8 converts from Uint8x16 to Uint16x8 -func (from Uint8x16) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Uint8x16 to Uint32x4 -func (from Uint8x16) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Uint8x16 to Uint64x2 -func (from Uint8x16) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Uint8x32 to Float32x8 -func (from Uint8x32) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Uint8x32 to Float64x4 -func (from Uint8x32) AsFloat64x4() (to Float64x4) - -// Int16x16 converts from Uint8x32 to Int16x16 -func (from Uint8x32) AsInt16x16() (to Int16x16) +// converts from Mask8x16 to Int8x16 +func (from Mask8x16) AsInt8x16() (to Int8x16) -// Int32x8 converts from Uint8x32 to Int32x8 -func (from Uint8x32) AsInt32x8() (to Int32x8) +// converts from Int8x16 to Mask8x16 +func (from Int8x16) AsMask8x16() (to Mask8x16) -// Int64x4 converts from Uint8x32 to Int64x4 -func (from Uint8x32) AsInt64x4() (to Int64x4) +func (x Mask8x16) And(y Mask8x16) Mask8x16 -// Int8x32 converts from Uint8x32 to Int8x32 -func (from Uint8x32) AsInt8x32() (to Int8x32) +func (x Mask8x16) Or(y Mask8x16) Mask8x16 -// Uint16x16 converts from Uint8x32 to Uint16x16 -func (from Uint8x32) AsUint16x16() (to Uint16x16) +// converts from Mask8x32 to Int8x32 +func (from Mask8x32) AsInt8x32() (to Int8x32) -// Uint32x8 converts from Uint8x32 to Uint32x8 -func (from Uint8x32) AsUint32x8() (to Uint32x8) +// converts from Int8x32 to Mask8x32 +func (from Int8x32) AsMask8x32() (to Mask8x32) -// Uint64x4 converts from Uint8x32 to Uint64x4 -func (from Uint8x32) AsUint64x4() (to Uint64x4) +func (x Mask8x32) And(y Mask8x32) Mask8x32 -// Float32x16 converts from Uint8x64 to Float32x16 -func (from Uint8x64) AsFloat32x16() (to Float32x16) +func (x Mask8x32) Or(y Mask8x32) Mask8x32 -// Float64x8 converts from Uint8x64 to Float64x8 -func (from Uint8x64) AsFloat64x8() (to Float64x8) +// converts from Mask8x64 to Int8x64 +func (from Mask8x64) AsInt8x64() (to Int8x64) -// Int16x32 converts from Uint8x64 to Int16x32 -func (from Uint8x64) AsInt16x32() (to Int16x32) +// converts from Int8x64 to Mask8x64 +func (from Int8x64) AsMask8x64() (to Mask8x64) -// Int32x16 converts from Uint8x64 to Int32x16 -func (from Uint8x64) AsInt32x16() (to Int32x16) +func (x Mask8x64) And(y Mask8x64) Mask8x64 -// Int64x8 converts from Uint8x64 to Int64x8 -func (from Uint8x64) AsInt64x8() (to Int64x8) +func (x Mask8x64) Or(y Mask8x64) Mask8x64 -// Int8x64 converts from Uint8x64 to Int8x64 -func (from Uint8x64) AsInt8x64() (to Int8x64) +// converts from Mask16x8 to Int16x8 +func (from Mask16x8) AsInt16x8() (to Int16x8) -// Uint16x32 converts from Uint8x64 to Uint16x32 -func (from Uint8x64) AsUint16x32() (to Uint16x32) +// converts from Int16x8 to Mask16x8 +func (from Int16x8) AsMask16x8() (to Mask16x8) -// Uint32x16 converts from Uint8x64 to Uint32x16 -func (from Uint8x64) AsUint32x16() (to Uint32x16) +func (x Mask16x8) And(y Mask16x8) Mask16x8 -// Uint64x8 converts from Uint8x64 to Uint64x8 -func (from Uint8x64) AsUint64x8() (to Uint64x8) +func (x Mask16x8) Or(y Mask16x8) Mask16x8 // converts from Mask16x16 to Int16x16 func (from Mask16x16) AsInt16x16() (to Int16x16) @@ -9532,26 +9572,6 @@ func (x Mask16x32) And(y Mask16x32) Mask16x32 func (x Mask16x32) Or(y Mask16x32) Mask16x32 -// converts from Mask16x8 to Int16x8 -func (from Mask16x8) AsInt16x8() (to Int16x8) - -// converts from Int16x8 to Mask16x8 -func (from Int16x8) AsMask16x8() (to Mask16x8) - -func (x Mask16x8) And(y Mask16x8) Mask16x8 - -func (x Mask16x8) Or(y Mask16x8) Mask16x8 - -// converts from Mask32x16 to Int32x16 -func (from Mask32x16) AsInt32x16() (to Int32x16) - -// converts from Int32x16 to Mask32x16 -func (from Int32x16) AsMask32x16() (to Mask32x16) - -func (x Mask32x16) And(y Mask32x16) Mask32x16 - -func (x Mask32x16) Or(y Mask32x16) Mask32x16 - // converts from Mask32x4 to Int32x4 func (from Mask32x4) AsInt32x4() (to Int32x4) @@ -9572,6 +9592,16 @@ func (x Mask32x8) And(y Mask32x8) Mask32x8 func (x Mask32x8) Or(y Mask32x8) Mask32x8 +// converts from Mask32x16 to Int32x16 +func (from Mask32x16) AsInt32x16() (to Int32x16) + +// converts from Int32x16 to Mask32x16 +func (from Int32x16) AsMask32x16() (to Mask32x16) + +func (x Mask32x16) And(y Mask32x16) Mask32x16 + +func (x Mask32x16) Or(y Mask32x16) Mask32x16 + // converts from Mask64x2 to Int64x2 func (from Mask64x2) AsInt64x2() (to Int64x2) @@ -9601,33 +9631,3 @@ func (from Int64x8) AsMask64x8() (to Mask64x8) func (x Mask64x8) And(y Mask64x8) Mask64x8 func (x Mask64x8) Or(y Mask64x8) Mask64x8 - -// converts from Mask8x16 to Int8x16 -func (from Mask8x16) AsInt8x16() (to Int8x16) - -// converts from Int8x16 to Mask8x16 -func (from Int8x16) AsMask8x16() (to Mask8x16) - -func (x Mask8x16) And(y Mask8x16) Mask8x16 - -func (x Mask8x16) Or(y Mask8x16) Mask8x16 - -// converts from Mask8x32 to Int8x32 -func (from Mask8x32) AsInt8x32() (to Int8x32) - -// converts from Int8x32 to Mask8x32 -func (from Int8x32) AsMask8x32() (to Mask8x32) - -func (x Mask8x32) And(y Mask8x32) Mask8x32 - -func (x Mask8x32) Or(y Mask8x32) Mask8x32 - -// converts from Mask8x64 to Int8x64 -func (from Mask8x64) AsInt8x64() (to Int8x64) - -// converts from Int8x64 to Mask8x64 -func (from Int8x64) AsMask8x64() (to Mask8x64) - -func (x Mask8x64) And(y Mask8x64) Mask8x64 - -func (x Mask8x64) Or(y Mask8x64) Mask8x64 From 1be5eb2686d8050c7067897b1ed98446ff8566c5 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 16 Jun 2025 22:53:36 +0000 Subject: [PATCH 033/139] [dev.simd] cmd/compile: fix signature error of PairDotProdAccumulate. This CL is generated by CL 682135. Change-Id: I6f004b2eca6323f1ff22555c85db993386f24c6c Reviewed-on: https://go-review.googlesource.com/c/go/+/682155 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- .../compile/internal/ssa/_gen/simdAMD64.rules | 10 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 8 +- .../internal/ssa/_gen/simdgenericOps.go | 10 +- src/cmd/compile/internal/ssa/opGen.go | 186 ++++++++---------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 54 +++-- .../compile/internal/ssagen/simdintrinsics.go | 10 +- src/simd/stubs_amd64.go | 92 ++++----- 7 files changed, 167 insertions(+), 203 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d6d8246980a8bf..e8c59985005e6a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1115,9 +1115,9 @@ (MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) @@ -1450,11 +1450,9 @@ (SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) (SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) (SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) -(SaturatedUnsignedSignedPairDotProdUint16x16 ...) => (VPMADDUBSW256 ...) -(SaturatedUnsignedSignedPairDotProdUint16x32 ...) => (VPMADDUBSW512 ...) -(SaturatedUnsignedSignedPairDotProdUint16x8 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) +(SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 17d250421f38d4..fbbebfc2094f0b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -705,7 +705,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -714,17 +713,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -762,6 +758,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPAVGBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -769,6 +766,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -776,8 +774,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index a29decdf008110..ee2eb15fe62dfe 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -979,7 +979,6 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", argLength: 3, commutative: false}, {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, @@ -991,7 +990,6 @@ func simdGenericOps() []opData { {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint16x16", argLength: 2, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, @@ -1015,7 +1013,6 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", argLength: 3, commutative: false}, {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, @@ -1024,7 +1021,6 @@ func simdGenericOps() []opData { {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint16x32", argLength: 2, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, @@ -1049,7 +1045,6 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", argLength: 3, commutative: false}, {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, @@ -1061,7 +1056,6 @@ func simdGenericOps() []opData { {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint16x8", argLength: 2, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, @@ -1290,6 +1284,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint8x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint8x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint8x16", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", argLength: 3, commutative: false}, {name: "MaskedSubUint8x16", argLength: 3, commutative: false}, {name: "MaxUint8x16", argLength: 2, commutative: true}, {name: "MinUint8x16", argLength: 2, commutative: true}, @@ -1323,6 +1318,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint8x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint8x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint8x32", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", argLength: 3, commutative: false}, {name: "MaskedSubUint8x32", argLength: 3, commutative: false}, {name: "MaxUint8x32", argLength: 2, commutative: true}, {name: "MinUint8x32", argLength: 2, commutative: true}, @@ -1354,6 +1350,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint8x64", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint8x64", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint8x64", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", argLength: 3, commutative: false}, {name: "MaskedSubUint8x64", argLength: 3, commutative: false}, {name: "MaxUint8x64", argLength: 2, commutative: true}, {name: "MinUint8x64", argLength: 2, commutative: true}, @@ -1361,6 +1358,7 @@ func simdGenericOps() []opData { {name: "PopCountUint8x64", argLength: 1, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, {name: "CeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ac47bad525e80a..45f35548383212 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1898,7 +1898,6 @@ const ( OpAMD64VPMAXUWMasked256 OpAMD64VPMINUWMasked256 OpAMD64VPMULHUWMasked256 - OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUW256 OpAMD64VPMINUW256 OpAMD64VPMULHUW256 @@ -1907,17 +1906,14 @@ const ( OpAMD64VPMAXUWMasked512 OpAMD64VPMINUWMasked512 OpAMD64VPMULHUWMasked512 - OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUW512 OpAMD64VPMINUW512 OpAMD64VPMULHUW512 - OpAMD64VPMADDUBSW512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUWMasked128 OpAMD64VPMINUWMasked128 OpAMD64VPMULHUWMasked128 - OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUW128 OpAMD64VPMINUW128 OpAMD64VPMULHUW128 @@ -1955,6 +1951,7 @@ const ( OpAMD64VPAVGBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 + OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUB128 OpAMD64VPMINUB128 OpAMD64VPMADDUBSW128 @@ -1962,6 +1959,7 @@ const ( OpAMD64VPAVGBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 + OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUB256 OpAMD64VPMINUB256 OpAMD64VPMADDUBSW256 @@ -1969,8 +1967,10 @@ const ( OpAMD64VPAVGBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 + OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUB512 OpAMD64VPMINUB512 + OpAMD64VPMADDUBSW512 OpAMD64VRNDSCALEPS512 OpAMD64VREDUCEPS512 OpAMD64VCMPPS512 @@ -5262,7 +5262,6 @@ const ( OpMaskedPopCountUint16x16 OpMaskedSaturatedAddUint16x16 OpMaskedSaturatedSubUint16x16 - OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16 OpMaskedSubUint16x16 OpMaxUint16x16 OpMinUint16x16 @@ -5274,7 +5273,6 @@ const ( OpPopCountUint16x16 OpSaturatedAddUint16x16 OpSaturatedSubUint16x16 - OpSaturatedUnsignedSignedPairDotProdUint16x16 OpSubUint16x16 OpXorUint16x16 OpAddUint16x32 @@ -5298,7 +5296,6 @@ const ( OpMaskedPopCountUint16x32 OpMaskedSaturatedAddUint16x32 OpMaskedSaturatedSubUint16x32 - OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32 OpMaskedSubUint16x32 OpMaxUint16x32 OpMinUint16x32 @@ -5307,7 +5304,6 @@ const ( OpPopCountUint16x32 OpSaturatedAddUint16x32 OpSaturatedSubUint16x32 - OpSaturatedUnsignedSignedPairDotProdUint16x32 OpSubUint16x32 OpAddUint16x8 OpAndUint16x8 @@ -5332,7 +5328,6 @@ const ( OpMaskedPopCountUint16x8 OpMaskedSaturatedAddUint16x8 OpMaskedSaturatedSubUint16x8 - OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8 OpMaskedSubUint16x8 OpMaxUint16x8 OpMinUint16x8 @@ -5344,7 +5339,6 @@ const ( OpPopCountUint16x8 OpSaturatedAddUint16x8 OpSaturatedSubUint16x8 - OpSaturatedUnsignedSignedPairDotProdUint16x8 OpSubUint16x8 OpXorUint16x8 OpAddUint32x16 @@ -5573,6 +5567,7 @@ const ( OpMaskedPopCountUint8x16 OpMaskedSaturatedAddUint8x16 OpMaskedSaturatedSubUint8x16 + OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16 OpMaskedSubUint8x16 OpMaxUint8x16 OpMinUint8x16 @@ -5606,6 +5601,7 @@ const ( OpMaskedPopCountUint8x32 OpMaskedSaturatedAddUint8x32 OpMaskedSaturatedSubUint8x32 + OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32 OpMaskedSubUint8x32 OpMaxUint8x32 OpMinUint8x32 @@ -5637,6 +5633,7 @@ const ( OpMaskedPopCountUint8x64 OpMaskedSaturatedAddUint8x64 OpMaskedSaturatedSubUint8x64 + OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64 OpMaskedSubUint8x64 OpMaxUint8x64 OpMinUint8x64 @@ -5644,6 +5641,7 @@ const ( OpPopCountUint8x64 OpSaturatedAddUint8x64 OpSaturatedSubUint8x64 + OpSaturatedUnsignedSignedPairDotProdUint8x64 OpSubUint8x64 OpCeilSuppressExceptionWithPrecisionFloat32x16 OpCeilWithPrecisionFloat32x16 @@ -29231,21 +29229,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUW256", argLen: 2, @@ -29370,21 +29353,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSWMasked512", - argLen: 3, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUW512", argLen: 2, @@ -29430,20 +29398,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSW512", - argLen: 2, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPAVGW128", argLen: 2, @@ -29523,21 +29477,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSWMasked128", - argLen: 3, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUW128", argLen: 2, @@ -30111,6 +30050,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUB128", argLen: 2, @@ -30218,6 +30172,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUB256", argLen: 2, @@ -30325,6 +30294,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUB512", argLen: 2, @@ -30355,6 +30339,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VRNDSCALEPS512", auxType: auxInt8, @@ -64134,11 +64132,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", - argLen: 3, - generic: true, - }, { name: "MaskedSubUint16x16", argLen: 3, @@ -64200,11 +64193,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SaturatedUnsignedSignedPairDotProdUint16x16", - argLen: 2, - generic: true, - }, { name: "SubUint16x16", argLen: 2, @@ -64332,11 +64320,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", - argLen: 3, - generic: true, - }, { name: "MaskedSubUint16x32", argLen: 3, @@ -64382,11 +64365,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SaturatedUnsignedSignedPairDotProdUint16x32", - argLen: 2, - generic: true, - }, { name: "SubUint16x32", argLen: 2, @@ -64519,11 +64497,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", - argLen: 3, - generic: true, - }, { name: "MaskedSubUint16x8", argLen: 3, @@ -64585,11 +64558,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SaturatedUnsignedSignedPairDotProdUint16x8", - argLen: 2, - generic: true, - }, { name: "SubUint16x8", argLen: 2, @@ -65846,6 +65814,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint8x16", argLen: 3, @@ -66028,6 +66001,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint8x32", argLen: 3, @@ -66199,6 +66177,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint8x64", argLen: 3, @@ -66238,6 +66221,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 2, + generic: true, + }, { name: "SubUint8x64", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 80d8eef8733ecf..73b873be93be0b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3374,12 +3374,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) case OpMaskedSaturatedSubUint8x64: return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v) case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v) case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: @@ -4455,21 +4455,15 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubUint8x64: v.Op = OpAMD64VPSUBSB512 return true - case OpSaturatedUnsignedSignedPairDotProdUint16x16: - v.Op = OpAMD64VPMADDUBSW256 - return true - case OpSaturatedUnsignedSignedPairDotProdUint16x32: - v.Op = OpAMD64VPMADDUBSW512 - return true - case OpSaturatedUnsignedSignedPairDotProdUint16x8: - v.Op = OpAMD64VPMADDUBSW128 - return true case OpSaturatedUnsignedSignedPairDotProdUint8x16: v.Op = OpAMD64VPMADDUBSW128 return true case OpSaturatedUnsignedSignedPairDotProdUint8x32: v.Op = OpAMD64VPMADDUBSW256 return true + case OpSaturatedUnsignedSignedPairDotProdUint8x64: + v.Op = OpAMD64VPMADDUBSW512 + return true case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: v.Op = OpAMD64VPDPBUSDS512 return true @@ -46801,55 +46795,55 @@ func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 58e2e79eec12a6..2fb26dd01efe23 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1126,9 +1126,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) @@ -1463,9 +1463,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index c409d9663ff2f5..6a271154e10882 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -5962,17 +5962,17 @@ func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* MaskedPopCount */ @@ -6239,17 +6239,17 @@ func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* MaskedSaturatedSub */ @@ -6319,51 +6319,51 @@ func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 +func (x Uint8x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x16, z Mask16x8) Int16x8 // SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 +func (x Uint8x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x32, z Mask16x16) Int16x16 // SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 +func (x Uint8x64) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x64, z Mask16x32) Int16x32 /* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* MaskedSqrt */ @@ -6630,32 +6630,32 @@ func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* MaskedXor */ @@ -7597,17 +7597,17 @@ func (x Int16x32) PairDotProd(y Int16x32) Int32x16 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* PairwiseAdd */ @@ -8048,17 +8048,17 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* SaturatedPairwiseAdd */ @@ -8168,51 +8168,39 @@ func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 +func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* Sign */ @@ -8543,32 +8531,32 @@ func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* Xor */ From 1313521f75e947a91e712ccdfccbd51fe9f3fc11 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 17 Jun 2025 19:31:11 +0000 Subject: [PATCH 034/139] [dev.simd] cmd/compile: remove fused mul/add/sub shapes. This CL only keeps one shape of those fused mul/add/sub operations. The rest of the instructions will be generated during lowering as an optimization. This CL is generated by CL 682436. Change-Id: Iadee1786185289838e04f3aa8f333844cfacc02e Reviewed-on: https://go-review.googlesource.com/c/go/+/682435 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 274 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 252 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 180 - .../internal/ssa/_gen/simdgenericOps.go | 252 +- src/cmd/compile/internal/ssa/opGen.go | 5388 ++--------------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 2430 +------- .../compile/internal/ssagen/simdintrinsics.go | 252 +- src/simd/stubs_amd64.go | 1116 +--- 8 files changed, 857 insertions(+), 9287 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 484c389cef254f..7b47a8dddbfadb 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -654,114 +654,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked512: p = simdFp2k1k1Imm8(s, v) - case ssa.OpAMD64VFMADD132PS128, - ssa.OpAMD64VFMADD132PS256, - ssa.OpAMD64VFMADD132PS512, - ssa.OpAMD64VFMADD132PD128, - ssa.OpAMD64VFMADD132PD256, - ssa.OpAMD64VFMADD132PD512, - ssa.OpAMD64VFMADD213PS128, + case ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PD128, ssa.OpAMD64VFMADD213PD256, ssa.OpAMD64VFMADD213PD512, - ssa.OpAMD64VFMADD231PS128, - ssa.OpAMD64VFMADD231PS256, - ssa.OpAMD64VFMADD231PS512, - ssa.OpAMD64VFMADD231PD128, - ssa.OpAMD64VFMADD231PD256, - ssa.OpAMD64VFMADD231PD512, - ssa.OpAMD64VFMADDSUB132PS128, - ssa.OpAMD64VFMADDSUB132PS256, - ssa.OpAMD64VFMADDSUB132PS512, - ssa.OpAMD64VFMADDSUB132PD128, - ssa.OpAMD64VFMADDSUB132PD256, - ssa.OpAMD64VFMADDSUB132PD512, ssa.OpAMD64VFMADDSUB213PS128, ssa.OpAMD64VFMADDSUB213PS256, ssa.OpAMD64VFMADDSUB213PS512, ssa.OpAMD64VFMADDSUB213PD128, ssa.OpAMD64VFMADDSUB213PD256, ssa.OpAMD64VFMADDSUB213PD512, - ssa.OpAMD64VFMADDSUB231PS128, - ssa.OpAMD64VFMADDSUB231PS256, - ssa.OpAMD64VFMADDSUB231PS512, - ssa.OpAMD64VFMADDSUB231PD128, - ssa.OpAMD64VFMADDSUB231PD256, - ssa.OpAMD64VFMADDSUB231PD512, - ssa.OpAMD64VFMSUB132PS128, - ssa.OpAMD64VFMSUB132PS256, - ssa.OpAMD64VFMSUB132PS512, - ssa.OpAMD64VFMSUB132PD128, - ssa.OpAMD64VFMSUB132PD256, - ssa.OpAMD64VFMSUB132PD512, - ssa.OpAMD64VFMSUB213PS128, - ssa.OpAMD64VFMSUB213PS256, - ssa.OpAMD64VFMSUB213PS512, - ssa.OpAMD64VFMSUB213PD128, - ssa.OpAMD64VFMSUB213PD256, - ssa.OpAMD64VFMSUB213PD512, - ssa.OpAMD64VFMSUB231PS128, - ssa.OpAMD64VFMSUB231PS256, - ssa.OpAMD64VFMSUB231PS512, - ssa.OpAMD64VFMSUB231PD128, - ssa.OpAMD64VFMSUB231PD256, - ssa.OpAMD64VFMSUB231PD512, - ssa.OpAMD64VFMSUBADD132PS128, - ssa.OpAMD64VFMSUBADD132PS256, - ssa.OpAMD64VFMSUBADD132PS512, - ssa.OpAMD64VFMSUBADD132PD128, - ssa.OpAMD64VFMSUBADD132PD256, - ssa.OpAMD64VFMSUBADD132PD512, ssa.OpAMD64VFMSUBADD213PS128, ssa.OpAMD64VFMSUBADD213PS256, ssa.OpAMD64VFMSUBADD213PS512, ssa.OpAMD64VFMSUBADD213PD128, ssa.OpAMD64VFMSUBADD213PD256, ssa.OpAMD64VFMSUBADD213PD512, - ssa.OpAMD64VFMSUBADD231PS128, - ssa.OpAMD64VFMSUBADD231PS256, - ssa.OpAMD64VFMSUBADD231PS512, - ssa.OpAMD64VFMSUBADD231PD128, - ssa.OpAMD64VFMSUBADD231PD256, - ssa.OpAMD64VFMSUBADD231PD512, - ssa.OpAMD64VFNMADD132PS128, - ssa.OpAMD64VFNMADD132PS256, - ssa.OpAMD64VFNMADD132PS512, - ssa.OpAMD64VFNMADD132PD128, - ssa.OpAMD64VFNMADD132PD256, - ssa.OpAMD64VFNMADD132PD512, - ssa.OpAMD64VFNMADD213PS128, - ssa.OpAMD64VFNMADD213PS256, - ssa.OpAMD64VFNMADD213PS512, - ssa.OpAMD64VFNMADD213PD128, - ssa.OpAMD64VFNMADD213PD256, - ssa.OpAMD64VFNMADD213PD512, - ssa.OpAMD64VFNMADD231PS128, - ssa.OpAMD64VFNMADD231PS256, - ssa.OpAMD64VFNMADD231PS512, - ssa.OpAMD64VFNMADD231PD128, - ssa.OpAMD64VFNMADD231PD256, - ssa.OpAMD64VFNMADD231PD512, - ssa.OpAMD64VFNMSUB132PS128, - ssa.OpAMD64VFNMSUB132PS256, - ssa.OpAMD64VFNMSUB132PS512, - ssa.OpAMD64VFNMSUB132PD128, - ssa.OpAMD64VFNMSUB132PD256, - ssa.OpAMD64VFNMSUB132PD512, - ssa.OpAMD64VFNMSUB213PS128, - ssa.OpAMD64VFNMSUB213PS256, - ssa.OpAMD64VFNMSUB213PS512, - ssa.OpAMD64VFNMSUB213PD128, - ssa.OpAMD64VFNMSUB213PD256, - ssa.OpAMD64VFNMSUB213PD512, - ssa.OpAMD64VFNMSUB231PS128, - ssa.OpAMD64VFNMSUB231PS256, - ssa.OpAMD64VFNMSUB231PS512, - ssa.OpAMD64VFNMSUB231PD128, - ssa.OpAMD64VFNMSUB231PD256, - ssa.OpAMD64VFNMSUB231PD512, ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, @@ -776,114 +686,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdFp31ResultInArg0(s, v) - case ssa.OpAMD64VFMADD132PSMasked128, - ssa.OpAMD64VFMADD132PSMasked256, - ssa.OpAMD64VFMADD132PSMasked512, - ssa.OpAMD64VFMADD132PDMasked128, - ssa.OpAMD64VFMADD132PDMasked256, - ssa.OpAMD64VFMADD132PDMasked512, - ssa.OpAMD64VFMADD213PSMasked128, + case ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked128, - ssa.OpAMD64VFMADD231PSMasked256, - ssa.OpAMD64VFMADD231PSMasked512, - ssa.OpAMD64VFMADD231PDMasked128, - ssa.OpAMD64VFMADD231PDMasked256, - ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked128, - ssa.OpAMD64VFMADDSUB132PSMasked256, - ssa.OpAMD64VFMADDSUB132PSMasked512, - ssa.OpAMD64VFMADDSUB132PDMasked128, - ssa.OpAMD64VFMADDSUB132PDMasked256, - ssa.OpAMD64VFMADDSUB132PDMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked128, - ssa.OpAMD64VFMADDSUB231PSMasked256, - ssa.OpAMD64VFMADDSUB231PSMasked512, - ssa.OpAMD64VFMADDSUB231PDMasked128, - ssa.OpAMD64VFMADDSUB231PDMasked256, - ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked128, - ssa.OpAMD64VFMSUB132PSMasked256, - ssa.OpAMD64VFMSUB132PSMasked512, - ssa.OpAMD64VFMSUB132PDMasked128, - ssa.OpAMD64VFMSUB132PDMasked256, - ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked128, - ssa.OpAMD64VFMSUB213PSMasked256, - ssa.OpAMD64VFMSUB213PSMasked512, - ssa.OpAMD64VFMSUB213PDMasked128, - ssa.OpAMD64VFMSUB213PDMasked256, - ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked128, - ssa.OpAMD64VFMSUB231PSMasked256, - ssa.OpAMD64VFMSUB231PSMasked512, - ssa.OpAMD64VFMSUB231PDMasked128, - ssa.OpAMD64VFMSUB231PDMasked256, - ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked128, - ssa.OpAMD64VFMSUBADD132PSMasked256, - ssa.OpAMD64VFMSUBADD132PSMasked512, - ssa.OpAMD64VFMSUBADD132PDMasked128, - ssa.OpAMD64VFMSUBADD132PDMasked256, - ssa.OpAMD64VFMSUBADD132PDMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked128, - ssa.OpAMD64VFMSUBADD231PSMasked256, - ssa.OpAMD64VFMSUBADD231PSMasked512, - ssa.OpAMD64VFMSUBADD231PDMasked128, - ssa.OpAMD64VFMSUBADD231PDMasked256, - ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked128, - ssa.OpAMD64VFNMADD132PSMasked256, - ssa.OpAMD64VFNMADD132PSMasked512, - ssa.OpAMD64VFNMADD132PDMasked128, - ssa.OpAMD64VFNMADD132PDMasked256, - ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked128, - ssa.OpAMD64VFNMADD213PSMasked256, - ssa.OpAMD64VFNMADD213PSMasked512, - ssa.OpAMD64VFNMADD213PDMasked128, - ssa.OpAMD64VFNMADD213PDMasked256, - ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked128, - ssa.OpAMD64VFNMADD231PSMasked256, - ssa.OpAMD64VFNMADD231PSMasked512, - ssa.OpAMD64VFNMADD231PDMasked128, - ssa.OpAMD64VFNMADD231PDMasked256, - ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked128, - ssa.OpAMD64VFNMSUB132PSMasked256, - ssa.OpAMD64VFNMSUB132PSMasked512, - ssa.OpAMD64VFNMSUB132PDMasked128, - ssa.OpAMD64VFNMSUB132PDMasked256, - ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked128, - ssa.OpAMD64VFNMSUB213PSMasked256, - ssa.OpAMD64VFNMSUB213PSMasked512, - ssa.OpAMD64VFNMSUB213PDMasked128, - ssa.OpAMD64VFNMSUB213PDMasked256, - ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked128, - ssa.OpAMD64VFNMSUB231PSMasked256, - ssa.OpAMD64VFNMSUB231PSMasked512, - ssa.OpAMD64VFNMSUB231PDMasked128, - ssa.OpAMD64VFNMSUB231PDMasked256, - ssa.OpAMD64VFNMSUB231PDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, @@ -995,114 +815,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VFMADD132PSMasked128, - ssa.OpAMD64VFMADD132PSMasked256, - ssa.OpAMD64VFMADD132PSMasked512, - ssa.OpAMD64VFMADD132PDMasked128, - ssa.OpAMD64VFMADD132PDMasked256, - ssa.OpAMD64VFMADD132PDMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked128, - ssa.OpAMD64VFMADD231PSMasked256, - ssa.OpAMD64VFMADD231PSMasked512, - ssa.OpAMD64VFMADD231PDMasked128, - ssa.OpAMD64VFMADD231PDMasked256, - ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked128, - ssa.OpAMD64VFMADDSUB132PSMasked256, - ssa.OpAMD64VFMADDSUB132PSMasked512, - ssa.OpAMD64VFMADDSUB132PDMasked128, - ssa.OpAMD64VFMADDSUB132PDMasked256, - ssa.OpAMD64VFMADDSUB132PDMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked128, - ssa.OpAMD64VFMADDSUB231PSMasked256, - ssa.OpAMD64VFMADDSUB231PSMasked512, - ssa.OpAMD64VFMADDSUB231PDMasked128, - ssa.OpAMD64VFMADDSUB231PDMasked256, - ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked128, - ssa.OpAMD64VFMSUB132PSMasked256, - ssa.OpAMD64VFMSUB132PSMasked512, - ssa.OpAMD64VFMSUB132PDMasked128, - ssa.OpAMD64VFMSUB132PDMasked256, - ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked128, - ssa.OpAMD64VFMSUB213PSMasked256, - ssa.OpAMD64VFMSUB213PSMasked512, - ssa.OpAMD64VFMSUB213PDMasked128, - ssa.OpAMD64VFMSUB213PDMasked256, - ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked128, - ssa.OpAMD64VFMSUB231PSMasked256, - ssa.OpAMD64VFMSUB231PSMasked512, - ssa.OpAMD64VFMSUB231PDMasked128, - ssa.OpAMD64VFMSUB231PDMasked256, - ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked128, - ssa.OpAMD64VFMSUBADD132PSMasked256, - ssa.OpAMD64VFMSUBADD132PSMasked512, - ssa.OpAMD64VFMSUBADD132PDMasked128, - ssa.OpAMD64VFMSUBADD132PDMasked256, - ssa.OpAMD64VFMSUBADD132PDMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked128, - ssa.OpAMD64VFMSUBADD231PSMasked256, - ssa.OpAMD64VFMSUBADD231PSMasked512, - ssa.OpAMD64VFMSUBADD231PDMasked128, - ssa.OpAMD64VFMSUBADD231PDMasked256, - ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked128, - ssa.OpAMD64VFNMADD132PSMasked256, - ssa.OpAMD64VFNMADD132PSMasked512, - ssa.OpAMD64VFNMADD132PDMasked128, - ssa.OpAMD64VFNMADD132PDMasked256, - ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked128, - ssa.OpAMD64VFNMADD213PSMasked256, - ssa.OpAMD64VFNMADD213PSMasked512, - ssa.OpAMD64VFNMADD213PDMasked128, - ssa.OpAMD64VFNMADD213PDMasked256, - ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked128, - ssa.OpAMD64VFNMADD231PSMasked256, - ssa.OpAMD64VFNMADD231PSMasked512, - ssa.OpAMD64VFNMADD231PDMasked128, - ssa.OpAMD64VFNMADD231PDMasked256, - ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked128, - ssa.OpAMD64VFNMSUB132PSMasked256, - ssa.OpAMD64VFNMSUB132PSMasked512, - ssa.OpAMD64VFNMSUB132PDMasked128, - ssa.OpAMD64VFNMSUB132PDMasked256, - ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked128, - ssa.OpAMD64VFNMSUB213PSMasked256, - ssa.OpAMD64VFNMSUB213PSMasked512, - ssa.OpAMD64VFNMSUB213PDMasked128, - ssa.OpAMD64VFNMSUB213PDMasked256, - ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked128, - ssa.OpAMD64VFNMSUB231PSMasked256, - ssa.OpAMD64VFNMSUB231PSMasked512, - ssa.OpAMD64VFNMSUB231PDMasked128, - ssa.OpAMD64VFNMSUB231PDMasked256, - ssa.OpAMD64VFNMSUB231PDMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e8c59985005e6a..cb57ae31b62c42 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -233,114 +233,24 @@ (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) -(FusedMultiplyAdd132Float32x16 ...) => (VFMADD132PS512 ...) -(FusedMultiplyAdd132Float32x4 ...) => (VFMADD132PS128 ...) -(FusedMultiplyAdd132Float32x8 ...) => (VFMADD132PS256 ...) -(FusedMultiplyAdd132Float64x2 ...) => (VFMADD132PD128 ...) -(FusedMultiplyAdd132Float64x4 ...) => (VFMADD132PD256 ...) -(FusedMultiplyAdd132Float64x8 ...) => (VFMADD132PD512 ...) -(FusedMultiplyAdd213Float32x16 ...) => (VFMADD213PS512 ...) -(FusedMultiplyAdd213Float32x4 ...) => (VFMADD213PS128 ...) -(FusedMultiplyAdd213Float32x8 ...) => (VFMADD213PS256 ...) -(FusedMultiplyAdd213Float64x2 ...) => (VFMADD213PD128 ...) -(FusedMultiplyAdd213Float64x4 ...) => (VFMADD213PD256 ...) -(FusedMultiplyAdd213Float64x8 ...) => (VFMADD213PD512 ...) -(FusedMultiplyAdd231Float32x16 ...) => (VFMADD231PS512 ...) -(FusedMultiplyAdd231Float32x4 ...) => (VFMADD231PS128 ...) -(FusedMultiplyAdd231Float32x8 ...) => (VFMADD231PS256 ...) -(FusedMultiplyAdd231Float64x2 ...) => (VFMADD231PD128 ...) -(FusedMultiplyAdd231Float64x4 ...) => (VFMADD231PD256 ...) -(FusedMultiplyAdd231Float64x8 ...) => (VFMADD231PD512 ...) -(FusedMultiplyAddSub132Float32x16 ...) => (VFMADDSUB132PS512 ...) -(FusedMultiplyAddSub132Float32x4 ...) => (VFMADDSUB132PS128 ...) -(FusedMultiplyAddSub132Float32x8 ...) => (VFMADDSUB132PS256 ...) -(FusedMultiplyAddSub132Float64x2 ...) => (VFMADDSUB132PD128 ...) -(FusedMultiplyAddSub132Float64x4 ...) => (VFMADDSUB132PD256 ...) -(FusedMultiplyAddSub132Float64x8 ...) => (VFMADDSUB132PD512 ...) -(FusedMultiplyAddSub213Float32x16 ...) => (VFMADDSUB213PS512 ...) -(FusedMultiplyAddSub213Float32x4 ...) => (VFMADDSUB213PS128 ...) -(FusedMultiplyAddSub213Float32x8 ...) => (VFMADDSUB213PS256 ...) -(FusedMultiplyAddSub213Float64x2 ...) => (VFMADDSUB213PD128 ...) -(FusedMultiplyAddSub213Float64x4 ...) => (VFMADDSUB213PD256 ...) -(FusedMultiplyAddSub213Float64x8 ...) => (VFMADDSUB213PD512 ...) -(FusedMultiplyAddSub231Float32x16 ...) => (VFMADDSUB231PS512 ...) -(FusedMultiplyAddSub231Float32x4 ...) => (VFMADDSUB231PS128 ...) -(FusedMultiplyAddSub231Float32x8 ...) => (VFMADDSUB231PS256 ...) -(FusedMultiplyAddSub231Float64x2 ...) => (VFMADDSUB231PD128 ...) -(FusedMultiplyAddSub231Float64x4 ...) => (VFMADDSUB231PD256 ...) -(FusedMultiplyAddSub231Float64x8 ...) => (VFMADDSUB231PD512 ...) -(FusedMultiplySub132Float32x16 ...) => (VFMSUB132PS512 ...) -(FusedMultiplySub132Float32x4 ...) => (VFMSUB132PS128 ...) -(FusedMultiplySub132Float32x8 ...) => (VFMSUB132PS256 ...) -(FusedMultiplySub132Float64x2 ...) => (VFMSUB132PD128 ...) -(FusedMultiplySub132Float64x4 ...) => (VFMSUB132PD256 ...) -(FusedMultiplySub132Float64x8 ...) => (VFMSUB132PD512 ...) -(FusedMultiplySub213Float32x16 ...) => (VFMSUB213PS512 ...) -(FusedMultiplySub213Float32x4 ...) => (VFMSUB213PS128 ...) -(FusedMultiplySub213Float32x8 ...) => (VFMSUB213PS256 ...) -(FusedMultiplySub213Float64x2 ...) => (VFMSUB213PD128 ...) -(FusedMultiplySub213Float64x4 ...) => (VFMSUB213PD256 ...) -(FusedMultiplySub213Float64x8 ...) => (VFMSUB213PD512 ...) -(FusedMultiplySub231Float32x16 ...) => (VFMSUB231PS512 ...) -(FusedMultiplySub231Float32x4 ...) => (VFMSUB231PS128 ...) -(FusedMultiplySub231Float32x8 ...) => (VFMSUB231PS256 ...) -(FusedMultiplySub231Float64x2 ...) => (VFMSUB231PD128 ...) -(FusedMultiplySub231Float64x4 ...) => (VFMSUB231PD256 ...) -(FusedMultiplySub231Float64x8 ...) => (VFMSUB231PD512 ...) -(FusedMultiplySubAdd132Float32x16 ...) => (VFMSUBADD132PS512 ...) -(FusedMultiplySubAdd132Float32x4 ...) => (VFMSUBADD132PS128 ...) -(FusedMultiplySubAdd132Float32x8 ...) => (VFMSUBADD132PS256 ...) -(FusedMultiplySubAdd132Float64x2 ...) => (VFMSUBADD132PD128 ...) -(FusedMultiplySubAdd132Float64x4 ...) => (VFMSUBADD132PD256 ...) -(FusedMultiplySubAdd132Float64x8 ...) => (VFMSUBADD132PD512 ...) -(FusedMultiplySubAdd213Float32x16 ...) => (VFMSUBADD213PS512 ...) -(FusedMultiplySubAdd213Float32x4 ...) => (VFMSUBADD213PS128 ...) -(FusedMultiplySubAdd213Float32x8 ...) => (VFMSUBADD213PS256 ...) -(FusedMultiplySubAdd213Float64x2 ...) => (VFMSUBADD213PD128 ...) -(FusedMultiplySubAdd213Float64x4 ...) => (VFMSUBADD213PD256 ...) -(FusedMultiplySubAdd213Float64x8 ...) => (VFMSUBADD213PD512 ...) -(FusedMultiplySubAdd231Float32x16 ...) => (VFMSUBADD231PS512 ...) -(FusedMultiplySubAdd231Float32x4 ...) => (VFMSUBADD231PS128 ...) -(FusedMultiplySubAdd231Float32x8 ...) => (VFMSUBADD231PS256 ...) -(FusedMultiplySubAdd231Float64x2 ...) => (VFMSUBADD231PD128 ...) -(FusedMultiplySubAdd231Float64x4 ...) => (VFMSUBADD231PD256 ...) -(FusedMultiplySubAdd231Float64x8 ...) => (VFMSUBADD231PD512 ...) -(FusedNegativeMultiplyAdd132Float32x16 ...) => (VFNMADD132PS512 ...) -(FusedNegativeMultiplyAdd132Float32x4 ...) => (VFNMADD132PS128 ...) -(FusedNegativeMultiplyAdd132Float32x8 ...) => (VFNMADD132PS256 ...) -(FusedNegativeMultiplyAdd132Float64x2 ...) => (VFNMADD132PD128 ...) -(FusedNegativeMultiplyAdd132Float64x4 ...) => (VFNMADD132PD256 ...) -(FusedNegativeMultiplyAdd132Float64x8 ...) => (VFNMADD132PD512 ...) -(FusedNegativeMultiplyAdd213Float32x16 ...) => (VFNMADD213PS512 ...) -(FusedNegativeMultiplyAdd213Float32x4 ...) => (VFNMADD213PS128 ...) -(FusedNegativeMultiplyAdd213Float32x8 ...) => (VFNMADD213PS256 ...) -(FusedNegativeMultiplyAdd213Float64x2 ...) => (VFNMADD213PD128 ...) -(FusedNegativeMultiplyAdd213Float64x4 ...) => (VFNMADD213PD256 ...) -(FusedNegativeMultiplyAdd213Float64x8 ...) => (VFNMADD213PD512 ...) -(FusedNegativeMultiplyAdd231Float32x16 ...) => (VFNMADD231PS512 ...) -(FusedNegativeMultiplyAdd231Float32x4 ...) => (VFNMADD231PS128 ...) -(FusedNegativeMultiplyAdd231Float32x8 ...) => (VFNMADD231PS256 ...) -(FusedNegativeMultiplyAdd231Float64x2 ...) => (VFNMADD231PD128 ...) -(FusedNegativeMultiplyAdd231Float64x4 ...) => (VFNMADD231PD256 ...) -(FusedNegativeMultiplyAdd231Float64x8 ...) => (VFNMADD231PD512 ...) -(FusedNegativeMultiplySub132Float32x16 ...) => (VFNMSUB132PS512 ...) -(FusedNegativeMultiplySub132Float32x4 ...) => (VFNMSUB132PS128 ...) -(FusedNegativeMultiplySub132Float32x8 ...) => (VFNMSUB132PS256 ...) -(FusedNegativeMultiplySub132Float64x2 ...) => (VFNMSUB132PD128 ...) -(FusedNegativeMultiplySub132Float64x4 ...) => (VFNMSUB132PD256 ...) -(FusedNegativeMultiplySub132Float64x8 ...) => (VFNMSUB132PD512 ...) -(FusedNegativeMultiplySub213Float32x16 ...) => (VFNMSUB213PS512 ...) -(FusedNegativeMultiplySub213Float32x4 ...) => (VFNMSUB213PS128 ...) -(FusedNegativeMultiplySub213Float32x8 ...) => (VFNMSUB213PS256 ...) -(FusedNegativeMultiplySub213Float64x2 ...) => (VFNMSUB213PD128 ...) -(FusedNegativeMultiplySub213Float64x4 ...) => (VFNMSUB213PD256 ...) -(FusedNegativeMultiplySub213Float64x8 ...) => (VFNMSUB213PD512 ...) -(FusedNegativeMultiplySub231Float32x16 ...) => (VFNMSUB231PS512 ...) -(FusedNegativeMultiplySub231Float32x4 ...) => (VFNMSUB231PS128 ...) -(FusedNegativeMultiplySub231Float32x8 ...) => (VFNMSUB231PS256 ...) -(FusedNegativeMultiplySub231Float64x2 ...) => (VFNMSUB231PD128 ...) -(FusedNegativeMultiplySub231Float64x4 ...) => (VFNMSUB231PD256 ...) -(FusedNegativeMultiplySub231Float64x8 ...) => (VFNMSUB231PD512 ...) +(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) +(FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) +(FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) +(FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) +(FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) +(FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) +(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) +(FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) +(FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) +(FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) +(FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) +(FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) +(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) +(FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) +(FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) +(FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) +(FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) +(FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) @@ -671,114 +581,24 @@ (MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAdd132Float32x16 x y z mask) => (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAdd132Float32x4 x y z mask) => (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAdd132Float32x8 x y z mask) => (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAdd132Float64x2 x y z mask) => (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAdd132Float64x4 x y z mask) => (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAdd132Float64x8 x y z mask) => (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAdd213Float32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAdd213Float32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAdd213Float32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAdd213Float64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAdd213Float64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAdd213Float64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAdd231Float32x16 x y z mask) => (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAdd231Float32x4 x y z mask) => (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAdd231Float32x8 x y z mask) => (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAdd231Float64x2 x y z mask) => (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAdd231Float64x4 x y z mask) => (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAdd231Float64x8 x y z mask) => (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSub132Float32x16 x y z mask) => (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSub132Float32x4 x y z mask) => (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSub132Float32x8 x y z mask) => (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSub132Float64x2 x y z mask) => (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSub132Float64x4 x y z mask) => (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSub132Float64x8 x y z mask) => (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSub213Float32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSub213Float32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSub213Float32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSub213Float64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSub213Float64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSub213Float64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSub231Float32x16 x y z mask) => (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSub231Float32x4 x y z mask) => (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSub231Float32x8 x y z mask) => (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSub231Float64x2 x y z mask) => (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSub231Float64x4 x y z mask) => (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSub231Float64x8 x y z mask) => (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySub132Float32x16 x y z mask) => (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySub132Float32x4 x y z mask) => (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySub132Float32x8 x y z mask) => (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySub132Float64x2 x y z mask) => (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySub132Float64x4 x y z mask) => (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySub132Float64x8 x y z mask) => (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySub213Float32x16 x y z mask) => (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySub213Float32x4 x y z mask) => (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySub213Float32x8 x y z mask) => (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySub213Float64x2 x y z mask) => (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySub213Float64x4 x y z mask) => (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySub213Float64x8 x y z mask) => (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySub231Float32x16 x y z mask) => (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySub231Float32x4 x y z mask) => (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySub231Float32x8 x y z mask) => (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySub231Float64x2 x y z mask) => (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySub231Float64x4 x y z mask) => (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySub231Float64x8 x y z mask) => (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAdd132Float32x16 x y z mask) => (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAdd132Float32x4 x y z mask) => (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAdd132Float32x8 x y z mask) => (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAdd132Float64x2 x y z mask) => (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAdd132Float64x4 x y z mask) => (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAdd132Float64x8 x y z mask) => (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAdd213Float32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAdd213Float32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAdd213Float32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAdd213Float64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAdd213Float64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAdd213Float64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAdd231Float32x16 x y z mask) => (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAdd231Float32x4 x y z mask) => (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAdd231Float32x8 x y z mask) => (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAdd231Float64x2 x y z mask) => (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAdd231Float64x4 x y z mask) => (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAdd231Float64x8 x y z mask) => (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) => (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) => (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) => (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) => (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) => (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) => (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) => (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) => (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) => (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) => (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) => (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) => (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) => (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) => (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) => (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) => (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) => (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) => (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) => (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) => (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) => (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) => (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) => (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) => (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) => (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) => (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) => (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) => (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) => (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) => (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) => (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) => (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) => (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) => (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) => (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) => (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSubFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSubFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSubFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAddFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAddFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index fbbebfc2094f0b..c46bc40443ff67 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -9,48 +9,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PS512", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PS512", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PS512", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PS512", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PS512", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PS512", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PS512", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PS512", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PS512", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PS512", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -74,48 +44,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PS128", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PS128", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PS128", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PS128", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PS128", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PS128", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PS128", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PS128", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PS128", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PS128", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -141,48 +81,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PS256", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PS256", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PS256", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PS256", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PS256", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PS256", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PS256", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PS256", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PS256", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PS256", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -208,48 +118,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PD128", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PD128", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PD128", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PD128", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PD128", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PD128", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PD128", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PD128", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PD128", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PD128", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -275,48 +155,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PD256", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PD256", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PD256", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PD256", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PD256", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PD256", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PD256", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PD256", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PD256", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PD256", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -341,48 +191,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PD512", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PD512", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PD512", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PD512", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PD512", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PD512", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PD512", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PD512", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PD512", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PD512", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ee2eb15fe62dfe..ab9b4ffd98c484 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -10,24 +10,9 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, - {name: "FusedMultiplyAdd132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, {name: "GreaterFloat32x16", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, {name: "IsNanFloat32x16", argLength: 2, commutative: true}, @@ -40,24 +25,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat32x16", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, @@ -91,24 +61,9 @@ func simdGenericOps() []opData { {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, @@ -121,24 +76,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat32x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, @@ -176,24 +116,9 @@ func simdGenericOps() []opData { {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -206,24 +131,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat32x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, @@ -262,24 +172,9 @@ func simdGenericOps() []opData { {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, @@ -292,24 +187,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat64x2", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, @@ -347,24 +227,9 @@ func simdGenericOps() []opData { {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, @@ -377,24 +242,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat64x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, @@ -429,24 +279,9 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, - {name: "FusedMultiplyAdd132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, {name: "GreaterFloat64x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, @@ -459,24 +294,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat64x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 45f35548383212..4b25da4e506ab0 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1202,48 +1202,18 @@ const ( OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 - OpAMD64VFMADD132PS512 OpAMD64VFMADD213PS512 - OpAMD64VFMADD231PS512 - OpAMD64VFMADDSUB132PS512 OpAMD64VFMADDSUB213PS512 - OpAMD64VFMADDSUB231PS512 - OpAMD64VFMSUB132PS512 - OpAMD64VFMSUB213PS512 - OpAMD64VFMSUB231PS512 - OpAMD64VFMSUBADD132PS512 OpAMD64VFMSUBADD213PS512 - OpAMD64VFMSUBADD231PS512 - OpAMD64VFNMADD132PS512 - OpAMD64VFNMADD213PS512 - OpAMD64VFNMADD231PS512 - OpAMD64VFNMSUB132PS512 - OpAMD64VFNMSUB213PS512 - OpAMD64VFNMSUB231PS512 OpAMD64VADDPSMasked512 OpAMD64VANDPSMasked512 OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PSMasked512 OpAMD64VDIVPSMasked512 - OpAMD64VFMADD132PSMasked512 OpAMD64VFMADD213PSMasked512 - OpAMD64VFMADD231PSMasked512 - OpAMD64VFMADDSUB132PSMasked512 OpAMD64VFMADDSUB213PSMasked512 - OpAMD64VFMADDSUB231PSMasked512 - OpAMD64VFMSUB132PSMasked512 - OpAMD64VFMSUB213PSMasked512 - OpAMD64VFMSUB231PSMasked512 - OpAMD64VFMSUBADD132PSMasked512 OpAMD64VFMSUBADD213PSMasked512 - OpAMD64VFMSUBADD231PSMasked512 - OpAMD64VFNMADD132PSMasked512 - OpAMD64VFNMADD213PSMasked512 - OpAMD64VFNMADD231PSMasked512 - OpAMD64VFNMSUB132PSMasked512 - OpAMD64VFNMSUB213PSMasked512 - OpAMD64VFNMSUB231PSMasked512 OpAMD64VMAXPSMasked512 OpAMD64VMINPSMasked512 OpAMD64VMULPSMasked512 @@ -1267,48 +1237,18 @@ const ( OpAMD64VRCP14PS128 OpAMD64VRSQRTPS128 OpAMD64VDIVPS128 - OpAMD64VFMADD132PS128 OpAMD64VFMADD213PS128 - OpAMD64VFMADD231PS128 - OpAMD64VFMADDSUB132PS128 OpAMD64VFMADDSUB213PS128 - OpAMD64VFMADDSUB231PS128 - OpAMD64VFMSUB132PS128 - OpAMD64VFMSUB213PS128 - OpAMD64VFMSUB231PS128 - OpAMD64VFMSUBADD132PS128 OpAMD64VFMSUBADD213PS128 - OpAMD64VFMSUBADD231PS128 - OpAMD64VFNMADD132PS128 - OpAMD64VFNMADD213PS128 - OpAMD64VFNMADD231PS128 - OpAMD64VFNMSUB132PS128 - OpAMD64VFNMSUB213PS128 - OpAMD64VFNMSUB231PS128 OpAMD64VADDPSMasked128 OpAMD64VANDPSMasked128 OpAMD64VANDNPSMasked128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRT14PSMasked128 OpAMD64VDIVPSMasked128 - OpAMD64VFMADD132PSMasked128 OpAMD64VFMADD213PSMasked128 - OpAMD64VFMADD231PSMasked128 - OpAMD64VFMADDSUB132PSMasked128 OpAMD64VFMADDSUB213PSMasked128 - OpAMD64VFMADDSUB231PSMasked128 - OpAMD64VFMSUB132PSMasked128 - OpAMD64VFMSUB213PSMasked128 - OpAMD64VFMSUB231PSMasked128 - OpAMD64VFMSUBADD132PSMasked128 OpAMD64VFMSUBADD213PSMasked128 - OpAMD64VFMSUBADD231PSMasked128 - OpAMD64VFNMADD132PSMasked128 - OpAMD64VFNMADD213PSMasked128 - OpAMD64VFNMADD231PSMasked128 - OpAMD64VFNMSUB132PSMasked128 - OpAMD64VFNMSUB213PSMasked128 - OpAMD64VFNMSUB231PSMasked128 OpAMD64VMAXPSMasked128 OpAMD64VMINPSMasked128 OpAMD64VMULPSMasked128 @@ -1334,48 +1274,18 @@ const ( OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 - OpAMD64VFMADD132PS256 OpAMD64VFMADD213PS256 - OpAMD64VFMADD231PS256 - OpAMD64VFMADDSUB132PS256 OpAMD64VFMADDSUB213PS256 - OpAMD64VFMADDSUB231PS256 - OpAMD64VFMSUB132PS256 - OpAMD64VFMSUB213PS256 - OpAMD64VFMSUB231PS256 - OpAMD64VFMSUBADD132PS256 OpAMD64VFMSUBADD213PS256 - OpAMD64VFMSUBADD231PS256 - OpAMD64VFNMADD132PS256 - OpAMD64VFNMADD213PS256 - OpAMD64VFNMADD231PS256 - OpAMD64VFNMSUB132PS256 - OpAMD64VFNMSUB213PS256 - OpAMD64VFNMSUB231PS256 OpAMD64VADDPSMasked256 OpAMD64VANDPSMasked256 OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRT14PSMasked256 OpAMD64VDIVPSMasked256 - OpAMD64VFMADD132PSMasked256 OpAMD64VFMADD213PSMasked256 - OpAMD64VFMADD231PSMasked256 - OpAMD64VFMADDSUB132PSMasked256 OpAMD64VFMADDSUB213PSMasked256 - OpAMD64VFMADDSUB231PSMasked256 - OpAMD64VFMSUB132PSMasked256 - OpAMD64VFMSUB213PSMasked256 - OpAMD64VFMSUB231PSMasked256 - OpAMD64VFMSUBADD132PSMasked256 OpAMD64VFMSUBADD213PSMasked256 - OpAMD64VFMSUBADD231PSMasked256 - OpAMD64VFNMADD132PSMasked256 - OpAMD64VFNMADD213PSMasked256 - OpAMD64VFNMADD231PSMasked256 - OpAMD64VFNMSUB132PSMasked256 - OpAMD64VFNMSUB213PSMasked256 - OpAMD64VFNMSUB231PSMasked256 OpAMD64VMAXPSMasked256 OpAMD64VMINPSMasked256 OpAMD64VMULPSMasked256 @@ -1401,48 +1311,18 @@ const ( OpAMD64VRCP14PD128 OpAMD64VRSQRT14PD128 OpAMD64VDIVPD128 - OpAMD64VFMADD132PD128 OpAMD64VFMADD213PD128 - OpAMD64VFMADD231PD128 - OpAMD64VFMADDSUB132PD128 OpAMD64VFMADDSUB213PD128 - OpAMD64VFMADDSUB231PD128 - OpAMD64VFMSUB132PD128 - OpAMD64VFMSUB213PD128 - OpAMD64VFMSUB231PD128 - OpAMD64VFMSUBADD132PD128 OpAMD64VFMSUBADD213PD128 - OpAMD64VFMSUBADD231PD128 - OpAMD64VFNMADD132PD128 - OpAMD64VFNMADD213PD128 - OpAMD64VFNMADD231PD128 - OpAMD64VFNMSUB132PD128 - OpAMD64VFNMSUB213PD128 - OpAMD64VFNMSUB231PD128 OpAMD64VADDPDMasked128 OpAMD64VANDPDMasked128 OpAMD64VANDNPDMasked128 OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PDMasked128 OpAMD64VDIVPDMasked128 - OpAMD64VFMADD132PDMasked128 OpAMD64VFMADD213PDMasked128 - OpAMD64VFMADD231PDMasked128 - OpAMD64VFMADDSUB132PDMasked128 OpAMD64VFMADDSUB213PDMasked128 - OpAMD64VFMADDSUB231PDMasked128 - OpAMD64VFMSUB132PDMasked128 - OpAMD64VFMSUB213PDMasked128 - OpAMD64VFMSUB231PDMasked128 - OpAMD64VFMSUBADD132PDMasked128 OpAMD64VFMSUBADD213PDMasked128 - OpAMD64VFMSUBADD231PDMasked128 - OpAMD64VFNMADD132PDMasked128 - OpAMD64VFNMADD213PDMasked128 - OpAMD64VFNMADD231PDMasked128 - OpAMD64VFNMSUB132PDMasked128 - OpAMD64VFNMSUB213PDMasked128 - OpAMD64VFNMSUB231PDMasked128 OpAMD64VMAXPDMasked128 OpAMD64VMINPDMasked128 OpAMD64VMULPDMasked128 @@ -1468,48 +1348,18 @@ const ( OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 - OpAMD64VFMADD132PD256 OpAMD64VFMADD213PD256 - OpAMD64VFMADD231PD256 - OpAMD64VFMADDSUB132PD256 OpAMD64VFMADDSUB213PD256 - OpAMD64VFMADDSUB231PD256 - OpAMD64VFMSUB132PD256 - OpAMD64VFMSUB213PD256 - OpAMD64VFMSUB231PD256 - OpAMD64VFMSUBADD132PD256 OpAMD64VFMSUBADD213PD256 - OpAMD64VFMSUBADD231PD256 - OpAMD64VFNMADD132PD256 - OpAMD64VFNMADD213PD256 - OpAMD64VFNMADD231PD256 - OpAMD64VFNMSUB132PD256 - OpAMD64VFNMSUB213PD256 - OpAMD64VFNMSUB231PD256 OpAMD64VADDPDMasked256 OpAMD64VANDPDMasked256 OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PDMasked256 OpAMD64VDIVPDMasked256 - OpAMD64VFMADD132PDMasked256 OpAMD64VFMADD213PDMasked256 - OpAMD64VFMADD231PDMasked256 - OpAMD64VFMADDSUB132PDMasked256 OpAMD64VFMADDSUB213PDMasked256 - OpAMD64VFMADDSUB231PDMasked256 - OpAMD64VFMSUB132PDMasked256 - OpAMD64VFMSUB213PDMasked256 - OpAMD64VFMSUB231PDMasked256 - OpAMD64VFMSUBADD132PDMasked256 OpAMD64VFMSUBADD213PDMasked256 - OpAMD64VFMSUBADD231PDMasked256 - OpAMD64VFNMADD132PDMasked256 - OpAMD64VFNMADD213PDMasked256 - OpAMD64VFNMADD231PDMasked256 - OpAMD64VFNMSUB132PDMasked256 - OpAMD64VFNMSUB213PDMasked256 - OpAMD64VFNMSUB231PDMasked256 OpAMD64VMAXPDMasked256 OpAMD64VMINPDMasked256 OpAMD64VMULPDMasked256 @@ -1534,48 +1384,18 @@ const ( OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 - OpAMD64VFMADD132PD512 OpAMD64VFMADD213PD512 - OpAMD64VFMADD231PD512 - OpAMD64VFMADDSUB132PD512 OpAMD64VFMADDSUB213PD512 - OpAMD64VFMADDSUB231PD512 - OpAMD64VFMSUB132PD512 - OpAMD64VFMSUB213PD512 - OpAMD64VFMSUB231PD512 - OpAMD64VFMSUBADD132PD512 OpAMD64VFMSUBADD213PD512 - OpAMD64VFMSUBADD231PD512 - OpAMD64VFNMADD132PD512 - OpAMD64VFNMADD213PD512 - OpAMD64VFNMADD231PD512 - OpAMD64VFNMSUB132PD512 - OpAMD64VFNMSUB213PD512 - OpAMD64VFNMSUB231PD512 OpAMD64VADDPDMasked512 OpAMD64VANDPDMasked512 OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PDMasked512 OpAMD64VDIVPDMasked512 - OpAMD64VFMADD132PDMasked512 OpAMD64VFMADD213PDMasked512 - OpAMD64VFMADD231PDMasked512 - OpAMD64VFMADDSUB132PDMasked512 OpAMD64VFMADDSUB213PDMasked512 - OpAMD64VFMADDSUB231PDMasked512 - OpAMD64VFMSUB132PDMasked512 - OpAMD64VFMSUB213PDMasked512 - OpAMD64VFMSUB231PDMasked512 - OpAMD64VFMSUBADD132PDMasked512 OpAMD64VFMSUBADD213PDMasked512 - OpAMD64VFMSUBADD231PDMasked512 - OpAMD64VFNMADD132PDMasked512 - OpAMD64VFNMADD213PDMasked512 - OpAMD64VFNMADD231PDMasked512 - OpAMD64VFNMSUB132PDMasked512 - OpAMD64VFNMSUB213PDMasked512 - OpAMD64VFNMSUB231PDMasked512 OpAMD64VMAXPDMasked512 OpAMD64VMINPDMasked512 OpAMD64VMULPDMasked512 @@ -4293,24 +4113,9 @@ const ( OpApproximateReciprocalOfSqrtFloat32x16 OpDivFloat32x16 OpEqualFloat32x16 - OpFusedMultiplyAdd132Float32x16 - OpFusedMultiplyAdd213Float32x16 - OpFusedMultiplyAdd231Float32x16 - OpFusedMultiplyAddSub132Float32x16 - OpFusedMultiplyAddSub213Float32x16 - OpFusedMultiplyAddSub231Float32x16 - OpFusedMultiplySub132Float32x16 - OpFusedMultiplySub213Float32x16 - OpFusedMultiplySub231Float32x16 - OpFusedMultiplySubAdd132Float32x16 - OpFusedMultiplySubAdd213Float32x16 - OpFusedMultiplySubAdd231Float32x16 - OpFusedNegativeMultiplyAdd132Float32x16 - OpFusedNegativeMultiplyAdd213Float32x16 - OpFusedNegativeMultiplyAdd231Float32x16 - OpFusedNegativeMultiplySub132Float32x16 - OpFusedNegativeMultiplySub213Float32x16 - OpFusedNegativeMultiplySub231Float32x16 + OpFusedMultiplyAddFloat32x16 + OpFusedMultiplyAddSubFloat32x16 + OpFusedMultiplySubAddFloat32x16 OpGreaterFloat32x16 OpGreaterEqualFloat32x16 OpIsNanFloat32x16 @@ -4323,24 +4128,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x16 OpMaskedDivFloat32x16 OpMaskedEqualFloat32x16 - OpMaskedFusedMultiplyAdd132Float32x16 - OpMaskedFusedMultiplyAdd213Float32x16 - OpMaskedFusedMultiplyAdd231Float32x16 - OpMaskedFusedMultiplyAddSub132Float32x16 - OpMaskedFusedMultiplyAddSub213Float32x16 - OpMaskedFusedMultiplyAddSub231Float32x16 - OpMaskedFusedMultiplySub132Float32x16 - OpMaskedFusedMultiplySub213Float32x16 - OpMaskedFusedMultiplySub231Float32x16 - OpMaskedFusedMultiplySubAdd132Float32x16 - OpMaskedFusedMultiplySubAdd213Float32x16 - OpMaskedFusedMultiplySubAdd231Float32x16 - OpMaskedFusedNegativeMultiplyAdd132Float32x16 - OpMaskedFusedNegativeMultiplyAdd213Float32x16 - OpMaskedFusedNegativeMultiplyAdd231Float32x16 - OpMaskedFusedNegativeMultiplySub132Float32x16 - OpMaskedFusedNegativeMultiplySub213Float32x16 - OpMaskedFusedNegativeMultiplySub231Float32x16 + OpMaskedFusedMultiplyAddFloat32x16 + OpMaskedFusedMultiplyAddSubFloat32x16 + OpMaskedFusedMultiplySubAddFloat32x16 OpMaskedGreaterFloat32x16 OpMaskedGreaterEqualFloat32x16 OpMaskedIsNanFloat32x16 @@ -4374,24 +4164,9 @@ const ( OpDivFloat32x4 OpEqualFloat32x4 OpFloorFloat32x4 - OpFusedMultiplyAdd132Float32x4 - OpFusedMultiplyAdd213Float32x4 - OpFusedMultiplyAdd231Float32x4 - OpFusedMultiplyAddSub132Float32x4 - OpFusedMultiplyAddSub213Float32x4 - OpFusedMultiplyAddSub231Float32x4 - OpFusedMultiplySub132Float32x4 - OpFusedMultiplySub213Float32x4 - OpFusedMultiplySub231Float32x4 - OpFusedMultiplySubAdd132Float32x4 - OpFusedMultiplySubAdd213Float32x4 - OpFusedMultiplySubAdd231Float32x4 - OpFusedNegativeMultiplyAdd132Float32x4 - OpFusedNegativeMultiplyAdd213Float32x4 - OpFusedNegativeMultiplyAdd231Float32x4 - OpFusedNegativeMultiplySub132Float32x4 - OpFusedNegativeMultiplySub213Float32x4 - OpFusedNegativeMultiplySub231Float32x4 + OpFusedMultiplyAddFloat32x4 + OpFusedMultiplyAddSubFloat32x4 + OpFusedMultiplySubAddFloat32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 OpIsNanFloat32x4 @@ -4404,24 +4179,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x4 OpMaskedDivFloat32x4 OpMaskedEqualFloat32x4 - OpMaskedFusedMultiplyAdd132Float32x4 - OpMaskedFusedMultiplyAdd213Float32x4 - OpMaskedFusedMultiplyAdd231Float32x4 - OpMaskedFusedMultiplyAddSub132Float32x4 - OpMaskedFusedMultiplyAddSub213Float32x4 - OpMaskedFusedMultiplyAddSub231Float32x4 - OpMaskedFusedMultiplySub132Float32x4 - OpMaskedFusedMultiplySub213Float32x4 - OpMaskedFusedMultiplySub231Float32x4 - OpMaskedFusedMultiplySubAdd132Float32x4 - OpMaskedFusedMultiplySubAdd213Float32x4 - OpMaskedFusedMultiplySubAdd231Float32x4 - OpMaskedFusedNegativeMultiplyAdd132Float32x4 - OpMaskedFusedNegativeMultiplyAdd213Float32x4 - OpMaskedFusedNegativeMultiplyAdd231Float32x4 - OpMaskedFusedNegativeMultiplySub132Float32x4 - OpMaskedFusedNegativeMultiplySub213Float32x4 - OpMaskedFusedNegativeMultiplySub231Float32x4 + OpMaskedFusedMultiplyAddFloat32x4 + OpMaskedFusedMultiplyAddSubFloat32x4 + OpMaskedFusedMultiplySubAddFloat32x4 OpMaskedGreaterFloat32x4 OpMaskedGreaterEqualFloat32x4 OpMaskedIsNanFloat32x4 @@ -4459,24 +4219,9 @@ const ( OpDivFloat32x8 OpEqualFloat32x8 OpFloorFloat32x8 - OpFusedMultiplyAdd132Float32x8 - OpFusedMultiplyAdd213Float32x8 - OpFusedMultiplyAdd231Float32x8 - OpFusedMultiplyAddSub132Float32x8 - OpFusedMultiplyAddSub213Float32x8 - OpFusedMultiplyAddSub231Float32x8 - OpFusedMultiplySub132Float32x8 - OpFusedMultiplySub213Float32x8 - OpFusedMultiplySub231Float32x8 - OpFusedMultiplySubAdd132Float32x8 - OpFusedMultiplySubAdd213Float32x8 - OpFusedMultiplySubAdd231Float32x8 - OpFusedNegativeMultiplyAdd132Float32x8 - OpFusedNegativeMultiplyAdd213Float32x8 - OpFusedNegativeMultiplyAdd231Float32x8 - OpFusedNegativeMultiplySub132Float32x8 - OpFusedNegativeMultiplySub213Float32x8 - OpFusedNegativeMultiplySub231Float32x8 + OpFusedMultiplyAddFloat32x8 + OpFusedMultiplyAddSubFloat32x8 + OpFusedMultiplySubAddFloat32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 OpIsNanFloat32x8 @@ -4489,24 +4234,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x8 OpMaskedDivFloat32x8 OpMaskedEqualFloat32x8 - OpMaskedFusedMultiplyAdd132Float32x8 - OpMaskedFusedMultiplyAdd213Float32x8 - OpMaskedFusedMultiplyAdd231Float32x8 - OpMaskedFusedMultiplyAddSub132Float32x8 - OpMaskedFusedMultiplyAddSub213Float32x8 - OpMaskedFusedMultiplyAddSub231Float32x8 - OpMaskedFusedMultiplySub132Float32x8 - OpMaskedFusedMultiplySub213Float32x8 - OpMaskedFusedMultiplySub231Float32x8 - OpMaskedFusedMultiplySubAdd132Float32x8 - OpMaskedFusedMultiplySubAdd213Float32x8 - OpMaskedFusedMultiplySubAdd231Float32x8 - OpMaskedFusedNegativeMultiplyAdd132Float32x8 - OpMaskedFusedNegativeMultiplyAdd213Float32x8 - OpMaskedFusedNegativeMultiplyAdd231Float32x8 - OpMaskedFusedNegativeMultiplySub132Float32x8 - OpMaskedFusedNegativeMultiplySub213Float32x8 - OpMaskedFusedNegativeMultiplySub231Float32x8 + OpMaskedFusedMultiplyAddFloat32x8 + OpMaskedFusedMultiplyAddSubFloat32x8 + OpMaskedFusedMultiplySubAddFloat32x8 OpMaskedGreaterFloat32x8 OpMaskedGreaterEqualFloat32x8 OpMaskedIsNanFloat32x8 @@ -4545,24 +4275,9 @@ const ( OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 OpFloorFloat64x2 - OpFusedMultiplyAdd132Float64x2 - OpFusedMultiplyAdd213Float64x2 - OpFusedMultiplyAdd231Float64x2 - OpFusedMultiplyAddSub132Float64x2 - OpFusedMultiplyAddSub213Float64x2 - OpFusedMultiplyAddSub231Float64x2 - OpFusedMultiplySub132Float64x2 - OpFusedMultiplySub213Float64x2 - OpFusedMultiplySub231Float64x2 - OpFusedMultiplySubAdd132Float64x2 - OpFusedMultiplySubAdd213Float64x2 - OpFusedMultiplySubAdd231Float64x2 - OpFusedNegativeMultiplyAdd132Float64x2 - OpFusedNegativeMultiplyAdd213Float64x2 - OpFusedNegativeMultiplyAdd231Float64x2 - OpFusedNegativeMultiplySub132Float64x2 - OpFusedNegativeMultiplySub213Float64x2 - OpFusedNegativeMultiplySub231Float64x2 + OpFusedMultiplyAddFloat64x2 + OpFusedMultiplyAddSubFloat64x2 + OpFusedMultiplySubAddFloat64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 OpIsNanFloat64x2 @@ -4575,24 +4290,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x2 OpMaskedDivFloat64x2 OpMaskedEqualFloat64x2 - OpMaskedFusedMultiplyAdd132Float64x2 - OpMaskedFusedMultiplyAdd213Float64x2 - OpMaskedFusedMultiplyAdd231Float64x2 - OpMaskedFusedMultiplyAddSub132Float64x2 - OpMaskedFusedMultiplyAddSub213Float64x2 - OpMaskedFusedMultiplyAddSub231Float64x2 - OpMaskedFusedMultiplySub132Float64x2 - OpMaskedFusedMultiplySub213Float64x2 - OpMaskedFusedMultiplySub231Float64x2 - OpMaskedFusedMultiplySubAdd132Float64x2 - OpMaskedFusedMultiplySubAdd213Float64x2 - OpMaskedFusedMultiplySubAdd231Float64x2 - OpMaskedFusedNegativeMultiplyAdd132Float64x2 - OpMaskedFusedNegativeMultiplyAdd213Float64x2 - OpMaskedFusedNegativeMultiplyAdd231Float64x2 - OpMaskedFusedNegativeMultiplySub132Float64x2 - OpMaskedFusedNegativeMultiplySub213Float64x2 - OpMaskedFusedNegativeMultiplySub231Float64x2 + OpMaskedFusedMultiplyAddFloat64x2 + OpMaskedFusedMultiplyAddSubFloat64x2 + OpMaskedFusedMultiplySubAddFloat64x2 OpMaskedGreaterFloat64x2 OpMaskedGreaterEqualFloat64x2 OpMaskedIsNanFloat64x2 @@ -4630,24 +4330,9 @@ const ( OpDivFloat64x4 OpEqualFloat64x4 OpFloorFloat64x4 - OpFusedMultiplyAdd132Float64x4 - OpFusedMultiplyAdd213Float64x4 - OpFusedMultiplyAdd231Float64x4 - OpFusedMultiplyAddSub132Float64x4 - OpFusedMultiplyAddSub213Float64x4 - OpFusedMultiplyAddSub231Float64x4 - OpFusedMultiplySub132Float64x4 - OpFusedMultiplySub213Float64x4 - OpFusedMultiplySub231Float64x4 - OpFusedMultiplySubAdd132Float64x4 - OpFusedMultiplySubAdd213Float64x4 - OpFusedMultiplySubAdd231Float64x4 - OpFusedNegativeMultiplyAdd132Float64x4 - OpFusedNegativeMultiplyAdd213Float64x4 - OpFusedNegativeMultiplyAdd231Float64x4 - OpFusedNegativeMultiplySub132Float64x4 - OpFusedNegativeMultiplySub213Float64x4 - OpFusedNegativeMultiplySub231Float64x4 + OpFusedMultiplyAddFloat64x4 + OpFusedMultiplyAddSubFloat64x4 + OpFusedMultiplySubAddFloat64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 OpIsNanFloat64x4 @@ -4660,24 +4345,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x4 OpMaskedDivFloat64x4 OpMaskedEqualFloat64x4 - OpMaskedFusedMultiplyAdd132Float64x4 - OpMaskedFusedMultiplyAdd213Float64x4 - OpMaskedFusedMultiplyAdd231Float64x4 - OpMaskedFusedMultiplyAddSub132Float64x4 - OpMaskedFusedMultiplyAddSub213Float64x4 - OpMaskedFusedMultiplyAddSub231Float64x4 - OpMaskedFusedMultiplySub132Float64x4 - OpMaskedFusedMultiplySub213Float64x4 - OpMaskedFusedMultiplySub231Float64x4 - OpMaskedFusedMultiplySubAdd132Float64x4 - OpMaskedFusedMultiplySubAdd213Float64x4 - OpMaskedFusedMultiplySubAdd231Float64x4 - OpMaskedFusedNegativeMultiplyAdd132Float64x4 - OpMaskedFusedNegativeMultiplyAdd213Float64x4 - OpMaskedFusedNegativeMultiplyAdd231Float64x4 - OpMaskedFusedNegativeMultiplySub132Float64x4 - OpMaskedFusedNegativeMultiplySub213Float64x4 - OpMaskedFusedNegativeMultiplySub231Float64x4 + OpMaskedFusedMultiplyAddFloat64x4 + OpMaskedFusedMultiplyAddSubFloat64x4 + OpMaskedFusedMultiplySubAddFloat64x4 OpMaskedGreaterFloat64x4 OpMaskedGreaterEqualFloat64x4 OpMaskedIsNanFloat64x4 @@ -4712,24 +4382,9 @@ const ( OpApproximateReciprocalOfSqrtFloat64x8 OpDivFloat64x8 OpEqualFloat64x8 - OpFusedMultiplyAdd132Float64x8 - OpFusedMultiplyAdd213Float64x8 - OpFusedMultiplyAdd231Float64x8 - OpFusedMultiplyAddSub132Float64x8 - OpFusedMultiplyAddSub213Float64x8 - OpFusedMultiplyAddSub231Float64x8 - OpFusedMultiplySub132Float64x8 - OpFusedMultiplySub213Float64x8 - OpFusedMultiplySub231Float64x8 - OpFusedMultiplySubAdd132Float64x8 - OpFusedMultiplySubAdd213Float64x8 - OpFusedMultiplySubAdd231Float64x8 - OpFusedNegativeMultiplyAdd132Float64x8 - OpFusedNegativeMultiplyAdd213Float64x8 - OpFusedNegativeMultiplyAdd231Float64x8 - OpFusedNegativeMultiplySub132Float64x8 - OpFusedNegativeMultiplySub213Float64x8 - OpFusedNegativeMultiplySub231Float64x8 + OpFusedMultiplyAddFloat64x8 + OpFusedMultiplyAddSubFloat64x8 + OpFusedMultiplySubAddFloat64x8 OpGreaterFloat64x8 OpGreaterEqualFloat64x8 OpIsNanFloat64x8 @@ -4742,24 +4397,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x8 OpMaskedDivFloat64x8 OpMaskedEqualFloat64x8 - OpMaskedFusedMultiplyAdd132Float64x8 - OpMaskedFusedMultiplyAdd213Float64x8 - OpMaskedFusedMultiplyAdd231Float64x8 - OpMaskedFusedMultiplyAddSub132Float64x8 - OpMaskedFusedMultiplyAddSub213Float64x8 - OpMaskedFusedMultiplyAddSub231Float64x8 - OpMaskedFusedMultiplySub132Float64x8 - OpMaskedFusedMultiplySub213Float64x8 - OpMaskedFusedMultiplySub231Float64x8 - OpMaskedFusedMultiplySubAdd132Float64x8 - OpMaskedFusedMultiplySubAdd213Float64x8 - OpMaskedFusedMultiplySubAdd231Float64x8 - OpMaskedFusedNegativeMultiplyAdd132Float64x8 - OpMaskedFusedNegativeMultiplyAdd213Float64x8 - OpMaskedFusedNegativeMultiplyAdd231Float64x8 - OpMaskedFusedNegativeMultiplySub132Float64x8 - OpMaskedFusedNegativeMultiplySub213Float64x8 - OpMaskedFusedNegativeMultiplySub231Float64x8 + OpMaskedFusedMultiplyAddFloat64x8 + OpMaskedFusedMultiplyAddSubFloat64x8 + OpMaskedFusedMultiplySubAddFloat64x8 OpMaskedGreaterFloat64x8 OpMaskedGreaterEqualFloat64x8 OpMaskedIsNanFloat64x8 @@ -18514,22 +18154,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PS512", argLen: 3, @@ -18546,38 +18170,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PS512", argLen: 3, @@ -18594,86 +18186,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PS512", argLen: 3, @@ -18690,118 +18202,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPSMasked512", argLen: 3, @@ -18892,23 +18292,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PSMasked512", argLen: 4, @@ -18926,40 +18309,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PSMasked512", argLen: 4, @@ -18977,91 +18326,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PSMasked512", argLen: 4, @@ -19079,125 +18343,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPSMasked512", argLen: 3, @@ -19537,10 +18682,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PS128", + name: "VFMADD213PS128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD132PS, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19553,10 +18698,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS128", + name: "VFMADDSUB213PS128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19569,10 +18714,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PS128", + name: "VFMSUBADD213PS128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD231PS, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19585,15 +18730,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19601,15 +18746,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VANDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19617,15 +18762,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, + name: "VANDNPSMasked128", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19633,15 +18777,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PS, + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19649,15 +18791,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PS, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19665,266 +18805,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PS, + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked128", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19948,40 +18836,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PSMasked128", argLen: 4, @@ -19999,91 +18853,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PSMasked128", argLen: 4, @@ -20101,125 +18870,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPSMasked128", argLen: 3, @@ -20374,1518 +19024,15 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMULPS128", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSCALEFPS128", - argLen: 2, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VORPS128", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHADDPS128", - argLen: 2, - asm: x86.AVHADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHSUBPS128", - argLen: 2, - asm: x86.AVHSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPS128", - argLen: 1, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPS128", - argLen: 2, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VXORPS128", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPS256", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS256", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PS256", - argLen: 1, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPS256", - argLen: 2, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked256", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PSMasked256", - argLen: 2, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PSMasked256", - argLen: 2, - asm: x86.AVRSQRT14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPSMasked256", - argLen: 3, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMULPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPSMasked256", - argLen: 3, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VXORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPS256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMULPS256", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VORPS256", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHADDPS256", - argLen: 2, - asm: x86.AVHADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHSUBPS256", - argLen: 2, - asm: x86.AVHSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPS256", - argLen: 1, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPS256", - argLen: 2, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VXORPS256", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPD128", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDSUBPD128", - argLen: 2, - asm: x86.AVADDSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD128", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPD128", - argLen: 2, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, + { + name: "VMULPS128", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21893,15 +19040,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21909,15 +19054,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, + name: "VORPS128", + argLen: 2, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21925,15 +19069,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21941,15 +19083,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21957,14 +19097,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked128", - argLen: 3, - asm: x86.AVANDNPD, + name: "VSQRTPS128", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21972,13 +19110,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", + name: "VSUBPS128", argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21986,13 +19124,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked128", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VXORPS128", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22000,14 +19139,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked128", - argLen: 3, - asm: x86.AVDIVPD, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22015,16 +19154,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PD, + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22032,16 +19168,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VANDPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22049,16 +19183,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PD, + name: "VANDNPS256", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22066,16 +19197,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22083,16 +19210,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22100,16 +19223,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, + name: "VDIVPS256", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22117,16 +19237,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PDMasked128", - argLen: 4, + name: "VFMADD213PS256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB132PD, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22134,16 +19253,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PDMasked128", - argLen: 4, + name: "VFMADDSUB213PS256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB213PD, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22151,16 +19269,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PDMasked128", - argLen: 4, + name: "VFMSUBADD213PS256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB231PD, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22168,16 +19285,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22185,16 +19301,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VANDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22202,16 +19317,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, + name: "VANDNPSMasked256", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22219,16 +19332,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PD, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22236,16 +19346,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PD, + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22253,16 +19360,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PD, + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22270,10 +19375,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB132PDMasked128", + name: "VFMADD213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB132PD, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22287,10 +19392,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PDMasked128", + name: "VFMADDSUB213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB213PD, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22304,10 +19409,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PDMasked128", + name: "VFMSUBADD213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB231PD, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22321,10 +19426,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked128", + name: "VMAXPSMasked256", argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22337,10 +19442,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", + name: "VMINPSMasked256", argLen: 3, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22353,10 +19458,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked128", + name: "VMULPSMasked256", argLen: 3, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22369,9 +19474,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked128", + name: "VSCALEFPSMasked256", argLen: 3, - asm: x86.AVSCALEFPD, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22384,10 +19489,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPDMasked128", + name: "VORPSMasked256", argLen: 3, commutative: true, - asm: x86.AVORPD, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22400,9 +19505,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", + name: "VSQRTPSMasked256", argLen: 2, - asm: x86.AVSQRTPD, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22414,9 +19519,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked128", + name: "VSUBPSMasked256", argLen: 3, - asm: x86.AVSUBPD, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22429,10 +19534,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPDMasked128", + name: "VXORPSMasked256", argLen: 3, commutative: true, - asm: x86.AVXORPD, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22445,10 +19550,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMAXPS256", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22460,10 +19565,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", + name: "VMINPS256", argLen: 2, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22475,10 +19580,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD128", + name: "VMULPS256", argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22490,9 +19595,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD128", + name: "VSCALEFPS256", argLen: 2, - asm: x86.AVSCALEFPD, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22504,10 +19609,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPD128", + name: "VORPS256", argLen: 2, commutative: true, - asm: x86.AVORPD, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22519,9 +19624,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", + name: "VHADDPS256", argLen: 2, - asm: x86.AVHADDPD, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22533,9 +19638,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", + name: "VHSUBPS256", argLen: 2, - asm: x86.AVHSUBPD, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22547,9 +19652,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", + name: "VSQRTPS256", argLen: 1, - asm: x86.AVSQRTPD, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22560,9 +19665,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD128", + name: "VSUBPS256", argLen: 2, - asm: x86.AVSUBPD, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22574,10 +19679,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD128", + name: "VXORPS256", argLen: 2, commutative: true, - asm: x86.AVXORPD, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22589,7 +19694,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", + name: "VADDPD128", argLen: 2, commutative: true, asm: x86.AVADDPD, @@ -22604,7 +19709,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", + name: "VADDSUBPD128", argLen: 2, asm: x86.AVADDSUBPD, reg: regInfo{ @@ -22618,7 +19723,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPD256", + name: "VANDPD128", argLen: 2, commutative: true, asm: x86.AVANDPD, @@ -22633,7 +19738,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD256", + name: "VANDNPD128", argLen: 2, asm: x86.AVANDNPD, reg: regInfo{ @@ -22647,7 +19752,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", + name: "VRCP14PD128", argLen: 1, asm: x86.AVRCP14PD, reg: regInfo{ @@ -22660,7 +19765,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", + name: "VRSQRT14PD128", argLen: 1, asm: x86.AVRSQRT14PD, reg: regInfo{ @@ -22673,7 +19778,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", + name: "VDIVPD128", argLen: 2, asm: x86.AVDIVPD, reg: regInfo{ @@ -22687,10 +19792,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PD256", + name: "VFMADD213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22703,10 +19808,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD256", + name: "VFMADDSUB213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22719,10 +19824,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PD256", + name: "VFMSUBADD213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD231PD, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22735,15 +19840,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22751,15 +19872,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VANDNPDMasked128", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22767,15 +19901,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPDMasked128", + argLen: 3, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22783,15 +19930,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PD256", - argLen: 3, + name: "VFMADD213PDMasked128", + argLen: 4, resultInArg0: true, - asm: x86.AVFMSUB132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22799,15 +19964,48 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PD256", - argLen: 3, + name: "VFMSUBADD213PDMasked128", + argLen: 4, resultInArg0: true, - asm: x86.AVFMSUB213PD, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22815,15 +20013,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PD, + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22831,15 +20044,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, + name: "VORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22847,15 +20060,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22863,15 +20074,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22879,15 +20089,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PD, + name: "VXORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22895,15 +20105,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PD, + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22911,15 +20120,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PD, + name: "VMINPD128", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22927,15 +20135,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22943,15 +20150,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, + name: "VSCALEFPD128", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22959,15 +20164,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, + name: "VORPD128", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22975,15 +20179,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22991,15 +20193,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23007,14 +20207,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked256", - argLen: 3, - asm: x86.AVANDNPD, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23022,13 +20220,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked256", + name: "VSUBPD128", argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23036,13 +20234,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked256", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VXORPD128", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23050,14 +20249,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked256", - argLen: 3, - asm: x86.AVDIVPD, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23065,16 +20264,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PD, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23082,16 +20278,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VANDPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23099,16 +20293,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PD, + name: "VANDNPD256", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23116,16 +20307,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23133,16 +20320,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23150,16 +20333,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, + name: "VDIVPD256", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23167,16 +20347,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PDMasked256", - argLen: 4, + name: "VFMADD213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23184,16 +20363,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PDMasked256", - argLen: 4, + name: "VFMADDSUB213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23201,16 +20379,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PDMasked256", - argLen: 4, + name: "VFMSUBADD213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB231PD, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23218,16 +20395,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23235,16 +20411,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VANDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23252,16 +20427,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, + name: "VANDNPDMasked256", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23269,16 +20442,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PD, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23286,16 +20456,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PD, + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23303,16 +20470,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PD, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23320,10 +20485,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB132PDMasked256", + name: "VFMADD213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23337,10 +20502,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PDMasked256", + name: "VFMADDSUB213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23354,10 +20519,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PDMasked256", + name: "VFMSUBADD213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB231PD, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23722,22 +20887,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PD512", argLen: 3, @@ -23754,38 +20903,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PD512", argLen: 3, @@ -23802,86 +20919,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PD512", argLen: 3, @@ -23898,118 +20935,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPDMasked512", argLen: 3, @@ -24100,23 +21025,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PDMasked512", argLen: 4, @@ -24134,40 +21042,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PDMasked512", argLen: 4, @@ -24185,91 +21059,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PDMasked512", argLen: 4, @@ -24287,125 +21076,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPDMasked512", argLen: 3, @@ -58946,92 +55616,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float32x16", + name: "FusedMultiplyAddFloat32x16", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub132Float32x16", + name: "FusedMultiplyAddSubFloat32x16", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float32x16", + name: "FusedMultiplySubAddFloat32x16", argLen: 3, generic: true, }, @@ -59100,92 +55695,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float32x16", + name: "MaskedFusedMultiplyAddFloat32x16", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub213Float32x16", + name: "MaskedFusedMultiplyAddSubFloat32x16", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub231Float32x16", + name: "MaskedFusedMultiplySubAddFloat32x16", argLen: 4, generic: true, }, @@ -59371,92 +55891,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float32x4", + name: "FusedMultiplyAddFloat32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAdd213Float32x4", + name: "FusedMultiplyAddSubFloat32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAdd231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float32x4", + name: "FusedMultiplySubAddFloat32x4", argLen: 3, generic: true, }, @@ -59525,92 +55970,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float32x4", + name: "MaskedFusedMultiplyAddFloat32x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplyAdd132Float32x4", + name: "MaskedFusedMultiplyAddSubFloat32x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplyAdd213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float32x4", + name: "MaskedFusedMultiplySubAddFloat32x4", argLen: 4, generic: true, }, @@ -59816,92 +56186,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float32x8", + name: "FusedMultiplyAddFloat32x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub132Float32x8", + name: "FusedMultiplyAddSubFloat32x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float32x8", + name: "FusedMultiplySubAddFloat32x8", argLen: 3, generic: true, }, @@ -59970,92 +56265,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float32x8", + name: "MaskedFusedMultiplyAddFloat32x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplyAdd231Float32x8", + name: "MaskedFusedMultiplyAddSubFloat32x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float32x8", + name: "MaskedFusedMultiplySubAddFloat32x8", argLen: 4, generic: true, }, @@ -60267,92 +56487,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub132Float64x2", + name: "FusedMultiplyAddFloat64x2", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub213Float64x2", + name: "FusedMultiplyAddSubFloat64x2", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub231Float64x2", + name: "FusedMultiplySubAddFloat64x2", argLen: 3, generic: true, }, @@ -60421,92 +56566,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float64x2", + name: "MaskedFusedMultiplyAddFloat64x2", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAdd213Float64x2", + name: "MaskedFusedMultiplyAddSubFloat64x2", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAdd231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float64x2", + name: "MaskedFusedMultiplySubAddFloat64x2", argLen: 4, generic: true, }, @@ -60712,92 +56782,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float64x4", + name: "FusedMultiplyAddFloat64x4", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplyAdd132Float64x4", + name: "FusedMultiplyAddSubFloat64x4", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplyAdd213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float64x4", + name: "FusedMultiplySubAddFloat64x4", argLen: 3, generic: true, }, @@ -60866,92 +56861,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float64x4", + name: "MaskedFusedMultiplyAddFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub132Float64x4", + name: "MaskedFusedMultiplyAddSubFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float64x4", + name: "MaskedFusedMultiplySubAddFloat64x4", argLen: 4, generic: true, }, @@ -61142,92 +57062,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float64x8", + name: "FusedMultiplyAddFloat64x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplyAdd231Float64x8", + name: "FusedMultiplyAddSubFloat64x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float64x8", + name: "FusedMultiplySubAddFloat64x8", argLen: 3, generic: true, }, @@ -61296,92 +57141,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float64x8", + name: "MaskedFusedMultiplyAddFloat64x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub213Float64x8", + name: "MaskedFusedMultiplyAddSubFloat64x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub231Float64x8", + name: "MaskedFusedMultiplySubAddFloat64x8", argLen: 4, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 73b873be93be0b..c532b2caa3084b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1385,330 +1385,60 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) case OpFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) - case OpFusedMultiplyAdd132Float32x16: - v.Op = OpAMD64VFMADD132PS512 - return true - case OpFusedMultiplyAdd132Float32x4: - v.Op = OpAMD64VFMADD132PS128 - return true - case OpFusedMultiplyAdd132Float32x8: - v.Op = OpAMD64VFMADD132PS256 - return true - case OpFusedMultiplyAdd132Float64x2: - v.Op = OpAMD64VFMADD132PD128 - return true - case OpFusedMultiplyAdd132Float64x4: - v.Op = OpAMD64VFMADD132PD256 - return true - case OpFusedMultiplyAdd132Float64x8: - v.Op = OpAMD64VFMADD132PD512 - return true - case OpFusedMultiplyAdd213Float32x16: + case OpFusedMultiplyAddFloat32x16: v.Op = OpAMD64VFMADD213PS512 return true - case OpFusedMultiplyAdd213Float32x4: + case OpFusedMultiplyAddFloat32x4: v.Op = OpAMD64VFMADD213PS128 return true - case OpFusedMultiplyAdd213Float32x8: + case OpFusedMultiplyAddFloat32x8: v.Op = OpAMD64VFMADD213PS256 return true - case OpFusedMultiplyAdd213Float64x2: + case OpFusedMultiplyAddFloat64x2: v.Op = OpAMD64VFMADD213PD128 return true - case OpFusedMultiplyAdd213Float64x4: + case OpFusedMultiplyAddFloat64x4: v.Op = OpAMD64VFMADD213PD256 return true - case OpFusedMultiplyAdd213Float64x8: + case OpFusedMultiplyAddFloat64x8: v.Op = OpAMD64VFMADD213PD512 return true - case OpFusedMultiplyAdd231Float32x16: - v.Op = OpAMD64VFMADD231PS512 - return true - case OpFusedMultiplyAdd231Float32x4: - v.Op = OpAMD64VFMADD231PS128 - return true - case OpFusedMultiplyAdd231Float32x8: - v.Op = OpAMD64VFMADD231PS256 - return true - case OpFusedMultiplyAdd231Float64x2: - v.Op = OpAMD64VFMADD231PD128 - return true - case OpFusedMultiplyAdd231Float64x4: - v.Op = OpAMD64VFMADD231PD256 - return true - case OpFusedMultiplyAdd231Float64x8: - v.Op = OpAMD64VFMADD231PD512 - return true - case OpFusedMultiplyAddSub132Float32x16: - v.Op = OpAMD64VFMADDSUB132PS512 - return true - case OpFusedMultiplyAddSub132Float32x4: - v.Op = OpAMD64VFMADDSUB132PS128 - return true - case OpFusedMultiplyAddSub132Float32x8: - v.Op = OpAMD64VFMADDSUB132PS256 - return true - case OpFusedMultiplyAddSub132Float64x2: - v.Op = OpAMD64VFMADDSUB132PD128 - return true - case OpFusedMultiplyAddSub132Float64x4: - v.Op = OpAMD64VFMADDSUB132PD256 - return true - case OpFusedMultiplyAddSub132Float64x8: - v.Op = OpAMD64VFMADDSUB132PD512 - return true - case OpFusedMultiplyAddSub213Float32x16: + case OpFusedMultiplyAddSubFloat32x16: v.Op = OpAMD64VFMADDSUB213PS512 return true - case OpFusedMultiplyAddSub213Float32x4: + case OpFusedMultiplyAddSubFloat32x4: v.Op = OpAMD64VFMADDSUB213PS128 return true - case OpFusedMultiplyAddSub213Float32x8: + case OpFusedMultiplyAddSubFloat32x8: v.Op = OpAMD64VFMADDSUB213PS256 return true - case OpFusedMultiplyAddSub213Float64x2: + case OpFusedMultiplyAddSubFloat64x2: v.Op = OpAMD64VFMADDSUB213PD128 return true - case OpFusedMultiplyAddSub213Float64x4: + case OpFusedMultiplyAddSubFloat64x4: v.Op = OpAMD64VFMADDSUB213PD256 return true - case OpFusedMultiplyAddSub213Float64x8: + case OpFusedMultiplyAddSubFloat64x8: v.Op = OpAMD64VFMADDSUB213PD512 return true - case OpFusedMultiplyAddSub231Float32x16: - v.Op = OpAMD64VFMADDSUB231PS512 - return true - case OpFusedMultiplyAddSub231Float32x4: - v.Op = OpAMD64VFMADDSUB231PS128 - return true - case OpFusedMultiplyAddSub231Float32x8: - v.Op = OpAMD64VFMADDSUB231PS256 - return true - case OpFusedMultiplyAddSub231Float64x2: - v.Op = OpAMD64VFMADDSUB231PD128 - return true - case OpFusedMultiplyAddSub231Float64x4: - v.Op = OpAMD64VFMADDSUB231PD256 - return true - case OpFusedMultiplyAddSub231Float64x8: - v.Op = OpAMD64VFMADDSUB231PD512 - return true - case OpFusedMultiplySub132Float32x16: - v.Op = OpAMD64VFMSUB132PS512 - return true - case OpFusedMultiplySub132Float32x4: - v.Op = OpAMD64VFMSUB132PS128 - return true - case OpFusedMultiplySub132Float32x8: - v.Op = OpAMD64VFMSUB132PS256 - return true - case OpFusedMultiplySub132Float64x2: - v.Op = OpAMD64VFMSUB132PD128 - return true - case OpFusedMultiplySub132Float64x4: - v.Op = OpAMD64VFMSUB132PD256 - return true - case OpFusedMultiplySub132Float64x8: - v.Op = OpAMD64VFMSUB132PD512 - return true - case OpFusedMultiplySub213Float32x16: - v.Op = OpAMD64VFMSUB213PS512 - return true - case OpFusedMultiplySub213Float32x4: - v.Op = OpAMD64VFMSUB213PS128 - return true - case OpFusedMultiplySub213Float32x8: - v.Op = OpAMD64VFMSUB213PS256 - return true - case OpFusedMultiplySub213Float64x2: - v.Op = OpAMD64VFMSUB213PD128 - return true - case OpFusedMultiplySub213Float64x4: - v.Op = OpAMD64VFMSUB213PD256 - return true - case OpFusedMultiplySub213Float64x8: - v.Op = OpAMD64VFMSUB213PD512 - return true - case OpFusedMultiplySub231Float32x16: - v.Op = OpAMD64VFMSUB231PS512 - return true - case OpFusedMultiplySub231Float32x4: - v.Op = OpAMD64VFMSUB231PS128 - return true - case OpFusedMultiplySub231Float32x8: - v.Op = OpAMD64VFMSUB231PS256 - return true - case OpFusedMultiplySub231Float64x2: - v.Op = OpAMD64VFMSUB231PD128 - return true - case OpFusedMultiplySub231Float64x4: - v.Op = OpAMD64VFMSUB231PD256 - return true - case OpFusedMultiplySub231Float64x8: - v.Op = OpAMD64VFMSUB231PD512 - return true - case OpFusedMultiplySubAdd132Float32x16: - v.Op = OpAMD64VFMSUBADD132PS512 - return true - case OpFusedMultiplySubAdd132Float32x4: - v.Op = OpAMD64VFMSUBADD132PS128 - return true - case OpFusedMultiplySubAdd132Float32x8: - v.Op = OpAMD64VFMSUBADD132PS256 - return true - case OpFusedMultiplySubAdd132Float64x2: - v.Op = OpAMD64VFMSUBADD132PD128 - return true - case OpFusedMultiplySubAdd132Float64x4: - v.Op = OpAMD64VFMSUBADD132PD256 - return true - case OpFusedMultiplySubAdd132Float64x8: - v.Op = OpAMD64VFMSUBADD132PD512 - return true - case OpFusedMultiplySubAdd213Float32x16: + case OpFusedMultiplySubAddFloat32x16: v.Op = OpAMD64VFMSUBADD213PS512 return true - case OpFusedMultiplySubAdd213Float32x4: + case OpFusedMultiplySubAddFloat32x4: v.Op = OpAMD64VFMSUBADD213PS128 return true - case OpFusedMultiplySubAdd213Float32x8: + case OpFusedMultiplySubAddFloat32x8: v.Op = OpAMD64VFMSUBADD213PS256 return true - case OpFusedMultiplySubAdd213Float64x2: + case OpFusedMultiplySubAddFloat64x2: v.Op = OpAMD64VFMSUBADD213PD128 return true - case OpFusedMultiplySubAdd213Float64x4: + case OpFusedMultiplySubAddFloat64x4: v.Op = OpAMD64VFMSUBADD213PD256 return true - case OpFusedMultiplySubAdd213Float64x8: + case OpFusedMultiplySubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true - case OpFusedMultiplySubAdd231Float32x16: - v.Op = OpAMD64VFMSUBADD231PS512 - return true - case OpFusedMultiplySubAdd231Float32x4: - v.Op = OpAMD64VFMSUBADD231PS128 - return true - case OpFusedMultiplySubAdd231Float32x8: - v.Op = OpAMD64VFMSUBADD231PS256 - return true - case OpFusedMultiplySubAdd231Float64x2: - v.Op = OpAMD64VFMSUBADD231PD128 - return true - case OpFusedMultiplySubAdd231Float64x4: - v.Op = OpAMD64VFMSUBADD231PD256 - return true - case OpFusedMultiplySubAdd231Float64x8: - v.Op = OpAMD64VFMSUBADD231PD512 - return true - case OpFusedNegativeMultiplyAdd132Float32x16: - v.Op = OpAMD64VFNMADD132PS512 - return true - case OpFusedNegativeMultiplyAdd132Float32x4: - v.Op = OpAMD64VFNMADD132PS128 - return true - case OpFusedNegativeMultiplyAdd132Float32x8: - v.Op = OpAMD64VFNMADD132PS256 - return true - case OpFusedNegativeMultiplyAdd132Float64x2: - v.Op = OpAMD64VFNMADD132PD128 - return true - case OpFusedNegativeMultiplyAdd132Float64x4: - v.Op = OpAMD64VFNMADD132PD256 - return true - case OpFusedNegativeMultiplyAdd132Float64x8: - v.Op = OpAMD64VFNMADD132PD512 - return true - case OpFusedNegativeMultiplyAdd213Float32x16: - v.Op = OpAMD64VFNMADD213PS512 - return true - case OpFusedNegativeMultiplyAdd213Float32x4: - v.Op = OpAMD64VFNMADD213PS128 - return true - case OpFusedNegativeMultiplyAdd213Float32x8: - v.Op = OpAMD64VFNMADD213PS256 - return true - case OpFusedNegativeMultiplyAdd213Float64x2: - v.Op = OpAMD64VFNMADD213PD128 - return true - case OpFusedNegativeMultiplyAdd213Float64x4: - v.Op = OpAMD64VFNMADD213PD256 - return true - case OpFusedNegativeMultiplyAdd213Float64x8: - v.Op = OpAMD64VFNMADD213PD512 - return true - case OpFusedNegativeMultiplyAdd231Float32x16: - v.Op = OpAMD64VFNMADD231PS512 - return true - case OpFusedNegativeMultiplyAdd231Float32x4: - v.Op = OpAMD64VFNMADD231PS128 - return true - case OpFusedNegativeMultiplyAdd231Float32x8: - v.Op = OpAMD64VFNMADD231PS256 - return true - case OpFusedNegativeMultiplyAdd231Float64x2: - v.Op = OpAMD64VFNMADD231PD128 - return true - case OpFusedNegativeMultiplyAdd231Float64x4: - v.Op = OpAMD64VFNMADD231PD256 - return true - case OpFusedNegativeMultiplyAdd231Float64x8: - v.Op = OpAMD64VFNMADD231PD512 - return true - case OpFusedNegativeMultiplySub132Float32x16: - v.Op = OpAMD64VFNMSUB132PS512 - return true - case OpFusedNegativeMultiplySub132Float32x4: - v.Op = OpAMD64VFNMSUB132PS128 - return true - case OpFusedNegativeMultiplySub132Float32x8: - v.Op = OpAMD64VFNMSUB132PS256 - return true - case OpFusedNegativeMultiplySub132Float64x2: - v.Op = OpAMD64VFNMSUB132PD128 - return true - case OpFusedNegativeMultiplySub132Float64x4: - v.Op = OpAMD64VFNMSUB132PD256 - return true - case OpFusedNegativeMultiplySub132Float64x8: - v.Op = OpAMD64VFNMSUB132PD512 - return true - case OpFusedNegativeMultiplySub213Float32x16: - v.Op = OpAMD64VFNMSUB213PS512 - return true - case OpFusedNegativeMultiplySub213Float32x4: - v.Op = OpAMD64VFNMSUB213PS128 - return true - case OpFusedNegativeMultiplySub213Float32x8: - v.Op = OpAMD64VFNMSUB213PS256 - return true - case OpFusedNegativeMultiplySub213Float64x2: - v.Op = OpAMD64VFNMSUB213PD128 - return true - case OpFusedNegativeMultiplySub213Float64x4: - v.Op = OpAMD64VFNMSUB213PD256 - return true - case OpFusedNegativeMultiplySub213Float64x8: - v.Op = OpAMD64VFNMSUB213PD512 - return true - case OpFusedNegativeMultiplySub231Float32x16: - v.Op = OpAMD64VFNMSUB231PS512 - return true - case OpFusedNegativeMultiplySub231Float32x4: - v.Op = OpAMD64VFNMSUB231PS128 - return true - case OpFusedNegativeMultiplySub231Float32x8: - v.Op = OpAMD64VFNMSUB231PS256 - return true - case OpFusedNegativeMultiplySub231Float64x2: - v.Op = OpAMD64VFNMSUB231PD128 - return true - case OpFusedNegativeMultiplySub231Float64x4: - v.Op = OpAMD64VFNMSUB231PD256 - return true - case OpFusedNegativeMultiplySub231Float64x8: - v.Op = OpAMD64VFNMSUB231PD512 - return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2486,222 +2216,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) case OpMaskedFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) - case OpMaskedFusedMultiplyAdd132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v) - case OpMaskedFusedMultiplyAdd132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v) - case OpMaskedFusedMultiplyAdd132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v) - case OpMaskedFusedMultiplyAdd132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v) - case OpMaskedFusedMultiplyAdd132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v) - case OpMaskedFusedMultiplyAdd132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v) - case OpMaskedFusedMultiplyAdd213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v) - case OpMaskedFusedMultiplyAdd213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v) - case OpMaskedFusedMultiplyAdd213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v) - case OpMaskedFusedMultiplyAdd213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v) - case OpMaskedFusedMultiplyAdd213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v) - case OpMaskedFusedMultiplyAdd213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v) - case OpMaskedFusedMultiplyAdd231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v) - case OpMaskedFusedMultiplyAdd231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v) - case OpMaskedFusedMultiplyAdd231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v) - case OpMaskedFusedMultiplyAdd231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v) - case OpMaskedFusedMultiplyAdd231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v) - case OpMaskedFusedMultiplyAdd231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v) - case OpMaskedFusedMultiplyAddSub132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v) - case OpMaskedFusedMultiplyAddSub132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v) - case OpMaskedFusedMultiplyAddSub132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v) - case OpMaskedFusedMultiplyAddSub132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v) - case OpMaskedFusedMultiplyAddSub132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v) - case OpMaskedFusedMultiplyAddSub132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v) - case OpMaskedFusedMultiplyAddSub213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v) - case OpMaskedFusedMultiplyAddSub213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v) - case OpMaskedFusedMultiplyAddSub213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v) - case OpMaskedFusedMultiplyAddSub213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v) - case OpMaskedFusedMultiplyAddSub213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v) - case OpMaskedFusedMultiplyAddSub213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v) - case OpMaskedFusedMultiplyAddSub231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v) - case OpMaskedFusedMultiplyAddSub231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v) - case OpMaskedFusedMultiplyAddSub231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v) - case OpMaskedFusedMultiplyAddSub231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v) - case OpMaskedFusedMultiplyAddSub231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v) - case OpMaskedFusedMultiplyAddSub231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v) - case OpMaskedFusedMultiplySub132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v) - case OpMaskedFusedMultiplySub132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v) - case OpMaskedFusedMultiplySub132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v) - case OpMaskedFusedMultiplySub132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v) - case OpMaskedFusedMultiplySub132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v) - case OpMaskedFusedMultiplySub132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v) - case OpMaskedFusedMultiplySub213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v) - case OpMaskedFusedMultiplySub213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v) - case OpMaskedFusedMultiplySub213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v) - case OpMaskedFusedMultiplySub213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v) - case OpMaskedFusedMultiplySub213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v) - case OpMaskedFusedMultiplySub213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v) - case OpMaskedFusedMultiplySub231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v) - case OpMaskedFusedMultiplySub231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v) - case OpMaskedFusedMultiplySub231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v) - case OpMaskedFusedMultiplySub231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v) - case OpMaskedFusedMultiplySub231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v) - case OpMaskedFusedMultiplySub231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v) - case OpMaskedFusedMultiplySubAdd132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v) - case OpMaskedFusedMultiplySubAdd132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v) - case OpMaskedFusedMultiplySubAdd132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v) - case OpMaskedFusedMultiplySubAdd132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v) - case OpMaskedFusedMultiplySubAdd132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v) - case OpMaskedFusedMultiplySubAdd132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v) - case OpMaskedFusedMultiplySubAdd213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v) - case OpMaskedFusedMultiplySubAdd213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v) - case OpMaskedFusedMultiplySubAdd213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v) - case OpMaskedFusedMultiplySubAdd213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v) - case OpMaskedFusedMultiplySubAdd213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v) - case OpMaskedFusedMultiplySubAdd213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v) - case OpMaskedFusedMultiplySubAdd231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v) - case OpMaskedFusedMultiplySubAdd231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v) - case OpMaskedFusedMultiplySubAdd231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v) - case OpMaskedFusedMultiplySubAdd231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v) - case OpMaskedFusedMultiplySubAdd231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v) - case OpMaskedFusedMultiplySubAdd231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v) - case OpMaskedFusedNegativeMultiplyAdd132Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v) - case OpMaskedFusedNegativeMultiplyAdd132Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v) - case OpMaskedFusedNegativeMultiplyAdd132Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v) - case OpMaskedFusedNegativeMultiplyAdd132Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v) - case OpMaskedFusedNegativeMultiplyAdd132Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v) - case OpMaskedFusedNegativeMultiplyAdd132Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v) - case OpMaskedFusedNegativeMultiplyAdd213Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v) - case OpMaskedFusedNegativeMultiplyAdd213Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v) - case OpMaskedFusedNegativeMultiplyAdd213Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v) - case OpMaskedFusedNegativeMultiplyAdd213Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v) - case OpMaskedFusedNegativeMultiplyAdd213Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v) - case OpMaskedFusedNegativeMultiplyAdd213Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v) - case OpMaskedFusedNegativeMultiplyAdd231Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v) - case OpMaskedFusedNegativeMultiplyAdd231Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v) - case OpMaskedFusedNegativeMultiplyAdd231Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v) - case OpMaskedFusedNegativeMultiplyAdd231Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v) - case OpMaskedFusedNegativeMultiplyAdd231Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v) - case OpMaskedFusedNegativeMultiplyAdd231Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v) - case OpMaskedFusedNegativeMultiplySub132Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v) - case OpMaskedFusedNegativeMultiplySub132Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v) - case OpMaskedFusedNegativeMultiplySub132Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v) - case OpMaskedFusedNegativeMultiplySub132Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v) - case OpMaskedFusedNegativeMultiplySub132Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v) - case OpMaskedFusedNegativeMultiplySub132Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v) - case OpMaskedFusedNegativeMultiplySub213Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v) - case OpMaskedFusedNegativeMultiplySub213Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v) - case OpMaskedFusedNegativeMultiplySub213Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v) - case OpMaskedFusedNegativeMultiplySub213Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v) - case OpMaskedFusedNegativeMultiplySub213Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v) - case OpMaskedFusedNegativeMultiplySub213Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v) - case OpMaskedFusedNegativeMultiplySub231Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v) - case OpMaskedFusedNegativeMultiplySub231Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v) - case OpMaskedFusedNegativeMultiplySub231Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v) - case OpMaskedFusedNegativeMultiplySub231Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v) - case OpMaskedFusedNegativeMultiplySub231Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v) - case OpMaskedFusedNegativeMultiplySub231Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v) + case OpMaskedFusedMultiplyAddFloat32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v) + case OpMaskedFusedMultiplyAddFloat32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v) + case OpMaskedFusedMultiplyAddFloat32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v) + case OpMaskedFusedMultiplyAddFloat64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v) + case OpMaskedFusedMultiplyAddFloat64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v) + case OpMaskedFusedMultiplyAddFloat64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v) + case OpMaskedFusedMultiplyAddSubFloat32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v) + case OpMaskedFusedMultiplyAddSubFloat32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v) + case OpMaskedFusedMultiplyAddSubFloat32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v) + case OpMaskedFusedMultiplyAddSubFloat64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v) + case OpMaskedFusedMultiplyAddSubFloat64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v) + case OpMaskedFusedMultiplyAddSubFloat64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v) + case OpMaskedFusedMultiplySubAddFloat32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v) + case OpMaskedFusedMultiplySubAddFloat32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v) + case OpMaskedFusedMultiplySubAddFloat32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v) + case OpMaskedFusedMultiplySubAddFloat64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v) + case OpMaskedFusedMultiplySubAddFloat64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v) + case OpMaskedFusedMultiplySubAddFloat64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -37999,133 +37549,13 @@ func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd132Float32x16 x y z mask) - // result: (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float32x4 x y z mask) - // result: (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float32x8 x y z mask) - // result: (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float64x2 x y z mask) - // result: (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float64x4 x y z mask) - // result: (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float64x8 x y z mask) - // result: (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd213Float32x16 x y z mask) + // match: (MaskedFusedMultiplyAddFloat32x16 x y z mask) // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -38139,13 +37569,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float32x4 x y z mask) + // match: (MaskedFusedMultiplyAddFloat32x4 x y z mask) // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -38159,13 +37589,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float32x8 x y z mask) + // match: (MaskedFusedMultiplyAddFloat32x8 x y z mask) // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -38179,13 +37609,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float64x2 x y z mask) + // match: (MaskedFusedMultiplyAddFloat64x2 x y z mask) // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -38199,13 +37629,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float64x4 x y z mask) + // match: (MaskedFusedMultiplyAddFloat64x4 x y z mask) // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -38219,13 +37649,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float64x8 x y z mask) + // match: (MaskedFusedMultiplyAddFloat64x8 x y z mask) // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -38239,253 +37669,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float32x16 x y z mask) - // result: (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float32x4 x y z mask) - // result: (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float32x8 x y z mask) - // result: (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float64x2 x y z mask) - // result: (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float64x4 x y z mask) - // result: (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float64x8 x y z mask) - // result: (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float32x16 x y z mask) - // result: (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float32x4 x y z mask) - // result: (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float32x8 x y z mask) - // result: (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float64x2 x y z mask) - // result: (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float64x4 x y z mask) - // result: (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float64x8 x y z mask) - // result: (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float32x16 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat32x16 x y z mask) // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -38499,13 +37689,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float32x4 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat32x4 x y z mask) // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -38519,13 +37709,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float32x8 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat32x8 x y z mask) // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -38539,13 +37729,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float64x2 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat64x2 x y z mask) // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -38559,13 +37749,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float64x4 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat64x4 x y z mask) // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -38579,13 +37769,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float64x8 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat64x8 x y z mask) // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -38599,613 +37789,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float32x16 x y z mask) - // result: (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float32x4 x y z mask) - // result: (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float32x8 x y z mask) - // result: (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float64x2 x y z mask) - // result: (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float64x4 x y z mask) - // result: (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float64x8 x y z mask) - // result: (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float32x16 x y z mask) - // result: (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float32x4 x y z mask) - // result: (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float32x8 x y z mask) - // result: (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float64x2 x y z mask) - // result: (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float64x4 x y z mask) - // result: (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float64x8 x y z mask) - // result: (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float32x16 x y z mask) - // result: (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float32x4 x y z mask) - // result: (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float32x8 x y z mask) - // result: (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float64x2 x y z mask) - // result: (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float64x4 x y z mask) - // result: (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float64x8 x y z mask) - // result: (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float32x16 x y z mask) - // result: (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float32x4 x y z mask) - // result: (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float32x8 x y z mask) - // result: (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float64x2 x y z mask) - // result: (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float64x4 x y z mask) - // result: (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float64x8 x y z mask) - // result: (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float32x16 x y z mask) - // result: (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float32x4 x y z mask) - // result: (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float32x8 x y z mask) - // result: (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float64x2 x y z mask) - // result: (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float64x4 x y z mask) - // result: (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float64x8 x y z mask) - // result: (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float32x16 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat32x16 x y z mask) // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -39219,13 +37809,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float32x4 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat32x4 x y z mask) // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -39239,13 +37829,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float32x8 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat32x8 x y z mask) // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -39259,13 +37849,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float64x2 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat64x2 x y z mask) // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -39279,13 +37869,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float64x4 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat64x4 x y z mask) // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -39299,13 +37889,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float64x8 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat64x8 x y z mask) // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -39319,846 +37909,6 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float32x16 x y z mask) - // result: (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float32x4 x y z mask) - // result: (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float32x8 x y z mask) - // result: (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float64x2 x y z mask) - // result: (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float64x4 x y z mask) - // result: (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float64x8 x y z mask) - // result: (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) - // result: (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) - // result: (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) - // result: (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) - // result: (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) - // result: (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) - // result: (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) - // result: (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) - // result: (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) - // result: (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) - // result: (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) - // result: (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) - // result: (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) - // result: (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) - // result: (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) - // result: (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) - // result: (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) - // result: (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) - // result: (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) - // result: (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) - // result: (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) - // result: (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) - // result: (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) - // result: (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) - // result: (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) - // result: (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) - // result: (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) - // result: (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) - // result: (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) - // result: (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) - // result: (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) - // result: (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) - // result: (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) - // result: (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) - // result: (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) - // result: (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) - // result: (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 2fb26dd01efe23..dea1f649490b24 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -244,114 +244,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) @@ -682,114 +592,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 6a271154e10882..95d8b99c8404e9 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1330,581 +1330,101 @@ func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 -/* FusedMultiplyAdd132 */ +/* FusedMultiplyAdd */ -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAdd213 */ - -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 +func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 +func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 +func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 +func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 +func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 -/* FusedMultiplyAdd231 */ +/* FusedMultiplyAddSub */ -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSub132 */ - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSub213 */ - -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 +func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 +func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 +func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 +func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSub231 */ - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySub132 */ +func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 +/* FusedMultiplySubAdd */ -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySub213 */ - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySub231 */ - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAdd132 */ - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAdd213 */ - -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 +func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 +func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 +func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 +func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAdd231 */ - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplyAdd132 */ - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplyAdd213 */ - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplyAdd231 */ - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplySub132 */ - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplySub213 */ - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplySub231 */ - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 +func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* Greater */ @@ -3836,581 +3356,101 @@ func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedFusedMultiplyAdd132 */ +/* MaskedFusedMultiplyAdd */ -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAdd213 */ - -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) MaskedFusedMultiplyAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) MaskedFusedMultiplyAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) MaskedFusedMultiplyAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) MaskedFusedMultiplyAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) MaskedFusedMultiplyAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) MaskedFusedMultiplyAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -/* MaskedFusedMultiplyAdd231 */ +/* MaskedFusedMultiplyAddSub */ -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub132 */ - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub213 */ - -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) MaskedFusedMultiplyAddSub(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) MaskedFusedMultiplyAddSub(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) MaskedFusedMultiplyAddSub(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) MaskedFusedMultiplyAddSub(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) MaskedFusedMultiplyAddSub(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub231 */ - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySub132 */ +func (x Float64x8) MaskedFusedMultiplyAddSub(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +/* MaskedFusedMultiplySubAdd */ -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySub213 */ - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySub231 */ - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd132 */ - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd213 */ - -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) MaskedFusedMultiplySubAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) MaskedFusedMultiplySubAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) MaskedFusedMultiplySubAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) MaskedFusedMultiplySubAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd231 */ - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplyAdd132 */ - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplyAdd213 */ - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplyAdd231 */ - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplySub132 */ - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplySub213 */ - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplySub231 */ - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* MaskedGreater */ From 1b87d52549677a1ab3dfc05bb00eb568d81f6a5c Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 18 Jun 2025 14:11:38 -0400 Subject: [PATCH 035/139] [dev.simd] cmd/compile: add fp1gp1fp1 register mask for AMD64 This is paired with a matching simdgen CL 682679 Change-Id: Id494d40b5e64b723a47c1682b71e523a77b0eb87 Reviewed-on: https://go-review.googlesource.com/c/go/+/682656 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 19 ++++++++++--------- .../compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 99d0d0ec740063..e2cbc65957c75f 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -182,14 +182,15 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} - fp1k1 = regInfo{inputs: fponly, outputs: maskonly} - k1fp1 = regInfo{inputs: maskonly, outputs: fponly} - fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} - fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} - fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} - fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} - fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + fp1k1 = regInfo{inputs: fponly, outputs: maskonly} + k1fp1 = regInfo{inputs: maskonly, outputs: fponly} + fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} + fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + fp1gp1fp1 = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1300,7 +1301,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index c46bc40443ff67..259f1eff2332b9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1, fp1gp1fp1 regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, From 4150372a5d2c3b70591efe1ce208f0a92747f1dc Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 12:02:18 -0400 Subject: [PATCH 036/139] [dev.simd] cmd/compile: don't treat devel compiler as a released compiler The compiler has a logic to print different messages on internal compiler error depending on whether this is a released version of Go. It hides the panic stack trace if it is a released version. It does this by checking the version and see if it has a "go" prefix. This includes all the released versions. However, for a non- released build, if there is no explicit version set, cmd/dist now sets the toolchain version as go1.X-devel_XXX, which makes it be treated as a released compiler, and causes the stack trace to be hidden. Change the logic to not match a devel compiler as a released compiler. Change-Id: I5d3b2101527212f825b6e4000b36030c4f83870b Reviewed-on: https://go-review.googlesource.com/c/go/+/682975 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/base/print.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 119f06fbc03351..9e3348c1ecca89 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -220,7 +220,7 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) { fmt.Printf("\n") // If this is a released compiler version, ask for a bug report. - if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") { + if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") && !strings.Contains(buildcfg.Version, "devel") { fmt.Printf("\n") fmt.Printf("Please file a bug report including a short program that triggers the error.\n") fmt.Printf("https://go.dev/issue/new\n") From 7c6ac3527571319e6dde958c64137f1cbda0ecca Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 20 Jun 2025 15:18:03 -0400 Subject: [PATCH 037/139] [dev.simd] cmd/compile: add simdFp1gp1fp1Imm8 helper to amd64 code generation This is for VPINSRB[BWDQ], coming in a later CL. Change-Id: I6b4b99be43512623d4d6e5542221c18f0c5c2eb4 Reviewed-on: https://go-review.googlesource.com/c/go/+/682956 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/ssa.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 2962fe1698e164..b446f47dd4c0d2 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1626,6 +1626,22 @@ func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// Example instruction: VPINSRB $3, DX, X0, X0 +func simdFp1gp1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(v.Args[1].Reg()) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + // Example instruction: VPCMPD $1, Z1, Z2, K1 func simdFp2k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return simdFp21Imm8(s, v) From a8669c78f5547904f1771e5d1d2a515c0c97dc18 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 16:03:01 -0400 Subject: [PATCH 038/139] [dev.simd] sync: correct the type of runtime_StoreReluintptr runtime_StoreReluintptr linknames to internal/runtime/atomic.StoreReluintptr, which does not have a result. Change-Id: I468cce82985f391c221768188a5eaff43cbcd037 Reviewed-on: https://go-review.googlesource.com/c/go/+/683095 TryBot-Bypass: Cherry Mui Reviewed-by: David Chase --- src/sync/pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync/pool.go b/src/sync/pool.go index 0fa8f8cdaa028d..f9a8405b791736 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -315,4 +315,4 @@ func runtime_procUnpin() func runtime_LoadAcquintptr(ptr *uintptr) uintptr //go:linkname runtime_StoreReluintptr internal/runtime/atomic.StoreReluintptr -func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr +func runtime_StoreReluintptr(ptr *uintptr, val uintptr) From 88c013d6ff6740451e7d294f99206c98c7f23f70 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 16:28:14 -0400 Subject: [PATCH 039/139] [dev.simd] cmd/compile: generate function body for bodyless intrinsics For a compiler intrinsic, if it is used in a non-call context, e.g. as a function pointer, currently it requires fallback implementation (e.g. assembly code for atomic operations), otherwise it will result in a build failure. The fallback implementation needs to be maintained and tested, albeit rarely used in practice. Also, for SIMD, we're currently adding a large number of compiler intrinsics without providing fallback implementations (we might in the future). As methods, it is not unlikely that they are used in a non-call context, e.g. referenced from the type descriptor. This CL lets the compiler generate the function body for bodyless intrinsics. The compiler already recognizes a call to the function as an intrinsic and can directly generate code for it. So we just fill in the body with a call to the same function. Change-Id: I2636e3128f28301c9abaf2b48bc962ab56e7d1a9 Reviewed-on: https://go-review.googlesource.com/c/go/+/683096 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/gc/compile.go | 40 +++++++----- src/cmd/compile/internal/gc/main.go | 3 +- src/cmd/compile/internal/ir/expr.go | 11 ++++ src/cmd/compile/internal/ssagen/abi.go | 12 ++++ src/cmd/compile/internal/ssagen/intrinsics.go | 63 ++++++++++++++++++- 5 files changed, 111 insertions(+), 18 deletions(-) diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index 1a40df9a84ff04..1eb4b8cc37c30c 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -29,7 +29,7 @@ var ( compilequeue []*ir.Func // functions waiting to be compiled ) -func enqueueFunc(fn *ir.Func) { +func enqueueFunc(fn *ir.Func, symABIs *ssagen.SymABIs) { if ir.CurFunc != nil { base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc) } @@ -49,22 +49,30 @@ func enqueueFunc(fn *ir.Func) { } if len(fn.Body) == 0 { - // Initialize ABI wrappers if necessary. - ir.InitLSym(fn, false) - types.CalcSize(fn.Type()) - a := ssagen.AbiForBodylessFuncStackMap(fn) - abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper - if fn.ABI == obj.ABI0 { - // The current args_stackmap generation assumes the function - // is ABI0, and only ABI0 assembly function can have a FUNCDATA - // reference to args_stackmap (see cmd/internal/obj/plist.go:Flushplist). - // So avoid introducing an args_stackmap if the func is not ABI0. - liveness.WriteFuncMap(fn, abiInfo) - - x := ssagen.EmitArgInfo(fn, abiInfo) - objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL) + if ir.IsIntrinsicSym(fn.Sym()) && fn.Sym().Linkname == "" && !symABIs.HasDef(fn.Sym()) { + // Generate the function body for a bodyless intrinsic, in case it + // is used in a non-call context (e.g. as a function pointer). + // We skip functions defined in assembly, or has a linkname (which + // could be defined in another package). + ssagen.GenIntrinsicBody(fn) + } else { + // Initialize ABI wrappers if necessary. + ir.InitLSym(fn, false) + types.CalcSize(fn.Type()) + a := ssagen.AbiForBodylessFuncStackMap(fn) + abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper + if fn.ABI == obj.ABI0 { + // The current args_stackmap generation assumes the function + // is ABI0, and only ABI0 assembly function can have a FUNCDATA + // reference to args_stackmap (see cmd/internal/obj/plist.go:Flushplist). + // So avoid introducing an args_stackmap if the func is not ABI0. + liveness.WriteFuncMap(fn, abiInfo) + + x := ssagen.EmitArgInfo(fn, abiInfo) + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL) + } + return } - return } errorsBefore := base.Errors() diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 253ec3257a1a3b..c486920f5b2b72 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -188,6 +188,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { ir.EscFmt = escape.Fmt ir.IsIntrinsicCall = ssagen.IsIntrinsicCall + ir.IsIntrinsicSym = ssagen.IsIntrinsicSym inline.SSADumpInline = ssagen.DumpInline ssagen.InitEnv() ssagen.InitTables() @@ -304,7 +305,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { } if nextFunc < len(typecheck.Target.Funcs) { - enqueueFunc(typecheck.Target.Funcs[nextFunc]) + enqueueFunc(typecheck.Target.Funcs[nextFunc], symABIs) nextFunc++ continue } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 702adfdd84ef5e..e27e4336c973ff 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -1022,6 +1022,9 @@ func StaticCalleeName(n Node) *Name { // IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation. var IsIntrinsicCall = func(*CallExpr) bool { return false } +// IsIntrinsicSym reports whether the compiler back end will treat a call to this symbol as an intrinsic operation. +var IsIntrinsicSym = func(*types.Sym) bool { return false } + // SameSafeExpr checks whether it is safe to reuse one of l and r // instead of computing both. SameSafeExpr assumes that l and r are // used in the same statement or expression. In order for it to be @@ -1140,6 +1143,14 @@ func ParamNames(ft *types.Type) []Node { return args } +func RecvParamNames(ft *types.Type) []Node { + args := make([]Node, ft.NumRecvs()+ft.NumParams()) + for i, f := range ft.RecvParams() { + args[i] = f.Nname.(*Name) + } + return args +} + // MethodSym returns the method symbol representing a method name // associated with a specific receiver type. // diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index 3d50155cf36d10..0e8dbd944540e0 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -99,6 +99,18 @@ func (s *SymABIs) ReadSymABIs(file string) { } } +// HasDef returns whether the given symbol has an assembly definition. +func (s *SymABIs) HasDef(sym *types.Sym) bool { + symName := sym.Linkname + if symName == "" { + symName = sym.Pkg.Prefix + "." + sym.Name + } + symName = s.canonicalize(symName) + + _, hasDefABI := s.defs[symName] + return hasDefABI +} + // GenABIWrappers applies ABI information to Funcs and generates ABI // wrapper functions where necessary. func (s *SymABIs) GenABIWrappers() { diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 186cfc4865ed18..660047df1f2299 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -12,6 +12,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/sys" ) @@ -1751,5 +1752,65 @@ func IsIntrinsicCall(n *ir.CallExpr) bool { if !ok { return false } - return findIntrinsic(name.Sym()) != nil + return IsIntrinsicSym(name.Sym()) +} + +func IsIntrinsicSym(sym *types.Sym) bool { + return findIntrinsic(sym) != nil +} + +// GenIntrinsicBody generates the function body for a bodyless intrinsic. +// This is used when the intrinsic is used in a non-call context, e.g. +// as a function pointer, or (for a method) being referenced from the type +// descriptor. +// +// The compiler already recognizes a call to fn as an intrinsic and can +// directly generate code for it. So we just fill in the body with a call +// to fn. +func GenIntrinsicBody(fn *ir.Func) { + if ir.CurFunc != nil { + base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc) + } + + if base.Flag.LowerR != 0 { + fmt.Println("generate intrinsic for", ir.FuncName(fn)) + } + + pos := fn.Pos() + ft := fn.Type() + var ret ir.Node + + // For a method, it usually starts with an ODOTMETH (pre-typecheck) or + // OMETHEXPR (post-typecheck) referencing the method symbol without the + // receiver type, and Walk rewrites it to a call directly to the + // type-qualified method symbol, moving the receiver to an argument. + // Here fn has already the type-qualified method symbol, and it is hard + // to get the unqualified symbol. So we just generate the post-Walk form + // and mark it typechecked and Walked. + call := ir.NewCallExpr(pos, ir.OCALLFUNC, fn.Nname, nil) + call.Args = ir.RecvParamNames(ft) + call.IsDDD = ft.IsVariadic() + typecheck.Exprs(call.Args) + call.SetTypecheck(1) + call.SetWalked(true) + ret = call + if ft.NumResults() > 0 { + if ft.NumResults() == 1 { + call.SetType(ft.Result(0).Type) + } else { + call.SetType(ft.ResultsTuple()) + } + n := ir.NewReturnStmt(base.Pos, nil) + n.Results = []ir.Node{call} + ret = n + } + fn.Body.Append(ret) + + if base.Flag.LowerR != 0 { + ir.DumpList("generate intrinsic body", fn.Body) + } + + ir.CurFunc = fn + typecheck.Stmts(fn.Body) + ir.CurFunc = nil // we know CurFunc is nil at entry } From 0cdb2697d1fcfcb68669b5ca9f5e17b35f6b51bf Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 17:16:55 -0400 Subject: [PATCH 040/139] [dev.simd] simd: add tests for intrinsic used as a func value and via reflection Change-Id: I9d2be86be90c1ce1bfc031202e534df437af7a0f Reviewed-on: https://go-review.googlesource.com/c/go/+/683036 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/simd_test.go | 57 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 37e07c96d78618..c92463bb3f1e5e 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -7,17 +7,21 @@ package simd_test import ( + "reflect" "simd" "testing" ) +var sink any + func TestType(t *testing.T) { // Testing: - // - Defined as another struct's field is safe - // - Pointer is safe. - // - typedef is safe - // - type alias is safe - // - type conversion is safe + // - Defined as another struct's field is ok + // - Pointer is ok + // - Type defition is ok + // - Type alias is ok + // - Type conversion is ok + // - Conversion to interface is ok type alias = simd.Int32x4 type maskT simd.Mask32x4 type myStruct struct { @@ -32,6 +36,7 @@ func TestType(t *testing.T) { want := []int32{2, 4, 0, 0} y := simd.LoadInt32x4(&vals) v.y = &y + sink = y if !simd.HasAVX512BW() || !simd.HasAVX512VL() { t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") @@ -49,6 +54,48 @@ func TestType(t *testing.T) { } } +func TestFuncValue(t *testing.T) { + // Test that simd intrinsic can be used as a function value. + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + fn := simd.Int32x4.Add + sink = fn + x = fn(x, y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestReflectMethod(t *testing.T) { + // Test that simd intrinsic can be accessed via reflection. + // NOTE: we don't yet support reflect method.Call. + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + m, ok := reflect.TypeOf(x).MethodByName("Add") + if !ok { + t.Fatal("Add method not found") + } + fn := m.Func.Interface().(func(x, y simd.Int32x4) simd.Int32x4) + x = fn(x, y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + func TestAdd(t *testing.T) { xv := [4]int32{1, 2, 3, 4} yv := [4]int32{5, 6, 7, 8} From dd63b7aa0e47da12c8db937e486e977690d2e19b Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 20 Jun 2025 19:35:35 +0000 Subject: [PATCH 041/139] [dev.simd] simd: add AVX512 aggregated check This added check could make AI test code generation's life easier. Change-Id: I725f567100159acd1ee537e8b1e6cb9c9e2bc690 Reviewed-on: https://go-review.googlesource.com/c/go/+/683016 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/cpu.go | 9 +++------ src/simd/simd_test.go | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 52a5614e68eac4..b07b5288f20e25 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -11,10 +11,7 @@ package simd import "internal/cpu" -func HasAVX512BW() bool { - return cpu.X86.HasAVX512BW -} - -func HasAVX512VL() bool { - return cpu.X86.HasAVX512VL +// HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. +func HasAVX512() bool { + return cpu.X86.HasAVX512 } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index c92463bb3f1e5e..28e25132e63789 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -38,8 +38,8 @@ func TestType(t *testing.T) { v.y = &y sink = y - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } v.z = maskT(simd.LoadInt32x4(&maskv).AsMask32x4()) @@ -113,8 +113,8 @@ func TestAdd(t *testing.T) { } func TestVectorConversion(t *testing.T) { - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } xv := [4]int32{1, 2, 3, 4} @@ -131,8 +131,8 @@ func TestVectorConversion(t *testing.T) { } func TestMaskConversion(t *testing.T) { - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } v := [4]int32{1, 0, 1, 0} @@ -152,8 +152,8 @@ func TestMaskConversion(t *testing.T) { } func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } xv := [4]int32{1, 2, 3, 4} @@ -180,8 +180,8 @@ func TestCompare(t *testing.T) { want := []int32{8, 0, 8, 0} x := simd.LoadInt32x4(&xv) y := simd.LoadInt32x4(&yv) - if !simd.HasAVX512BW() { - t.Skip("Test requires HasAVX512BW, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } mask := x.Greater(y) From 1fa4bcfcdac00d186409a8d2a469cca1768824ca Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 20 Jun 2025 15:30:55 -0400 Subject: [PATCH 042/139] [dev.simd] simd, cmd/compile: generated code for VPINSR[BWDQ], and test This is paired with simdgen CL 683055 Change-Id: I91d2c08a97ddd7cf06dd24478d552b962846131c Reviewed-on: https://go-review.googlesource.com/c/go/+/683035 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 6 + .../compile/internal/ssa/_gen/simdAMD64.rules | 8 ++ .../compile/internal/ssa/_gen/simdAMD64ops.go | 4 + .../internal/ssa/_gen/simdgenericOps.go | 8 ++ src/cmd/compile/internal/ssa/opGen.go | 120 ++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 136 ++++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 8 ++ src/simd/simd_test.go | 13 ++ src/simd/stubs_amd64.go | 42 ++++++ 9 files changed, 345 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 7b47a8dddbfadb..005a2601653322 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -718,6 +718,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked512: p = simdFp3k1fp1ResultInArg0(s, v) + case ssa.OpAMD64VPINSRB128, + ssa.OpAMD64VPINSRW128, + ssa.OpAMD64VPINSRD128, + ssa.OpAMD64VPINSRQ128: + p = simdFp1gp1fp1Imm8(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index cb57ae31b62c42..615686166d1b4d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1279,6 +1279,14 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) +(SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) +(SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) +(SetElemInt64x2 [a] x y) => (VPINSRQ128 [a] x y) +(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) +(SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) +(SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) +(SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) +(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) (SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) (SignInt32x4 ...) => (VPSIGND128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 259f1eff2332b9..f4627d068cd960 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -645,20 +645,24 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ab9b4ffd98c484..ca196cd9e19079 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1372,5 +1372,13 @@ func simdGenericOps() []opData { {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 4b25da4e506ab0..121727e1f6b003 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1838,20 +1838,24 @@ const ( OpAMD64VPCMPWMasked512 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 + OpAMD64VPINSRW128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 OpAMD64VPCMPD128 OpAMD64VPCMPDMasked128 + OpAMD64VPINSRD128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 + OpAMD64VPINSRQ128 OpAMD64VPCMPQ256 OpAMD64VPCMPQMasked256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 + OpAMD64VPINSRB128 OpAMD64VPCMPB256 OpAMD64VPCMPBMasked256 OpAMD64VPCMPB512 @@ -5475,6 +5479,14 @@ const ( OpRoundWithPrecisionFloat64x8 OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpSetElemInt16x8 + OpSetElemInt32x4 + OpSetElemInt64x2 + OpSetElemInt8x16 + OpSetElemUint16x8 + OpSetElemUint32x4 + OpSetElemUint64x2 + OpSetElemUint8x16 ) var opcodeTable = [...]opInfo{ @@ -27738,6 +27750,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPD512", auxType: auxInt8, @@ -27803,6 +27830,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPD256", auxType: auxInt8, @@ -27867,6 +27909,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPQ256", auxType: auxInt8, @@ -27964,6 +28021,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPB256", auxType: auxInt8, @@ -63153,6 +63225,54 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SetElemInt16x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemInt32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemInt64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemInt8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint16x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c532b2caa3084b..7ac8c22e879359 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4038,6 +4038,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) + case OpSetElemInt16x8: + return rewriteValueAMD64_OpSetElemInt16x8(v) + case OpSetElemInt32x4: + return rewriteValueAMD64_OpSetElemInt32x4(v) + case OpSetElemInt64x2: + return rewriteValueAMD64_OpSetElemInt64x2(v) + case OpSetElemInt8x16: + return rewriteValueAMD64_OpSetElemInt8x16(v) + case OpSetElemUint16x8: + return rewriteValueAMD64_OpSetElemUint16x8(v) + case OpSetElemUint32x4: + return rewriteValueAMD64_OpSetElemUint32x4(v) + case OpSetElemUint64x2: + return rewriteValueAMD64_OpSetElemUint64x2(v) + case OpSetElemUint8x16: + return rewriteValueAMD64_OpSetElemUint8x16(v) case OpSignExt16to32: v.Op = OpAMD64MOVWQSX return true @@ -49462,6 +49478,126 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt16x8 [a] x y) + // result: (VPINSRW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt32x4 [a] x y) + // result: (VPINSRD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt8x16 [a] x y) + // result: (VPINSRB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint16x8 [a] x y) + // result: (VPINSRW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint32x4 [a] x y) + // result: (VPINSRD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint8x16 [a] x y) + // result: (VPINSRB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index dea1f649490b24..db4d2499791863 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1290,6 +1290,14 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x2.SetElem", opLen2Imm8(ssa.OpSetElemInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.SetElem", opLen2Imm8(ssa.OpSetElemUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.SetElem", opLen2Imm8(ssa.OpSetElemUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.SetElem", opLen2Imm8(ssa.OpSetElemUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 28e25132e63789..8658631e45200c 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -230,6 +230,19 @@ func TestSlicesInt8(t *testing.T) { checkInt8Slices(t, a, b) } +func TestSlicesInt8SetElem(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) + + v = v.SetElem(3, 13) + a[3] = 13 + + b := make([]int8, 16, 16) + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 95d8b99c8404e9..aeb8c6bda7bde1 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7242,6 +7242,48 @@ func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* SetElem */ + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRB, CPU Feature: AVX +func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRW, CPU Feature: AVX +func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Int32x4) SetElem(imm uint8, y int8) Int32x4 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRQ, CPU Feature: AVX +func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRB, CPU Feature: AVX +func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRW, CPU Feature: AVX +func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Uint32x4) SetElem(imm uint8, y uint8) Uint32x4 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRQ, CPU Feature: AVX +func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 + /* Sign */ // Sign returns the product of the first operand with -1, 0, or 1, From e32488003d32c17c87f89a0fcc14662422df1341 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 20 Jun 2025 17:09:32 -0400 Subject: [PATCH 043/139] [dev.simd] cmd/compile: make simd regmask naming more like existing conventions Paired with simdgen CL 682937 Change-Id: Ia826f643ece23bf4c7903dffe2fc15e39fbd5577 Reviewed-on: https://go-review.googlesource.com/c/go/+/683115 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 14 +- src/cmd/compile/internal/amd64/ssa.go | 22 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 704 +++++++++--------- 3 files changed, 370 insertions(+), 370 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 005a2601653322..9364722c3a788d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -509,7 +509,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: - p = simdFp2k1fp1(s, v) + p = simdFp2kfp(s, v) case ssa.OpAMD64VPABSBMasked128, ssa.OpAMD64VPABSBMasked256, @@ -553,7 +553,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512: - p = simdFp1k1fp1(s, v) + p = simdFpkfp(s, v) case ssa.OpAMD64VROUNDPS128, ssa.OpAMD64VROUNDPS256, @@ -585,7 +585,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512: - p = simdFp1k1fp1Imm8(s, v) + p = simdFpkfpImm8(s, v) case ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, @@ -620,7 +620,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPQ256: - p = simdFp2k1Imm8(s, v) + p = simdFp2kImm8(s, v) case ssa.OpAMD64VCMPPSMasked128, ssa.OpAMD64VCMPPSMasked256, @@ -652,7 +652,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked128, ssa.OpAMD64VPCMPUQMasked256, ssa.OpAMD64VPCMPUQMasked512: - p = simdFp2k1k1Imm8(s, v) + p = simdFp2kkImm8(s, v) case ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, @@ -716,13 +716,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512: - p = simdFp3k1fp1ResultInArg0(s, v) + p = simdFp3kfpResultInArg0(s, v) case ssa.OpAMD64VPINSRB128, ssa.OpAMD64VPINSRW128, ssa.OpAMD64VPINSRD128, ssa.OpAMD64VPINSRQ128: - p = simdFp1gp1fp1Imm8(s, v) + p = simdFpgpfpImm8(s, v) default: // Unknown reg shape diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index b446f47dd4c0d2..82226ec1cdeace 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1541,13 +1541,13 @@ func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPEQW Z26, Z30, K4 -func simdFp2k1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2k(s *ssagen.State, v *ssa.Value) *obj.Prog { // simdReg handles mask and vector registers altogether return simdFp21(s, v) } // Example instruction: VPMINUQ X21, X3, K3, X31 -func simdFp2k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[1]) @@ -1564,12 +1564,12 @@ func simdFp2k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPEQW Z26, Z30, K1, K4 -func simdFp2k1k1(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp2k1fp1(s, v) +func simdFp2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp2kfp(s, v) } // Example instruction: VPOPCNTB X14, K4, X16 -func simdFp1k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFpkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) @@ -1595,7 +1595,7 @@ func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VREDUCEPD $126, X1, K3, X31 -func simdFp1k1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFpkfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1627,7 +1627,7 @@ func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPINSRB $3, DX, X0, X0 -func simdFp1gp1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFpgpfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1643,12 +1643,12 @@ func simdFp1gp1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPD $1, Z1, Z2, K1 -func simdFp2k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return simdFp21Imm8(s, v) } // Example instruction: VPCMPD $1, Z1, Z2, K2, K1 -func simdFp2k1k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1676,7 +1676,7 @@ func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VFMADD213PD Z2, Z1, K1, Z0 -func simdFp3k1fp1ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp3kfpResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) @@ -1700,7 +1700,7 @@ func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Currently unused -func simdFp3k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp3kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index f4627d068cd960..9f8230946325af 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1, fp1gp1fp1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -12,23 +12,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -47,23 +47,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -84,23 +84,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -121,23 +121,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -158,23 +158,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -194,23 +194,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -223,17 +223,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -250,17 +250,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -274,17 +274,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -303,21 +303,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -333,21 +333,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -365,21 +365,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -396,18 +396,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -417,18 +417,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -438,18 +438,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -464,14 +464,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -487,14 +487,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -506,14 +506,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -521,175 +521,175 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: fp2k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, } } From 61c1183342897ed5544c0d37ad58d9038d50e3ea Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 20 Jun 2025 18:57:51 +0000 Subject: [PATCH 044/139] [dev.simd] simd: add test wrappers This CL adds test wrappers for unit tests, and change the existing Add/Sub test to be using wrappers. This CL is generated by CL 683455. Change-Id: Ibd388d82632ce56aad7a1ab5fff62db232819bb5 Reviewed-on: https://go-review.googlesource.com/c/go/+/683015 Auto-Submit: Junyang Shao Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/simd_test.go | 76 +- src/simd/simd_wrapped_test.go | 6739 +++++++++++++++++++++++++++++++++ src/simd/stubs_amd64.go | 16 +- 3 files changed, 6755 insertions(+), 76 deletions(-) create mode 100644 src/simd/simd_wrapped_test.go diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 8658631e45200c..6df634b428f5d8 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -96,22 +96,6 @@ func TestReflectMethod(t *testing.T) { } } -func TestAdd(t *testing.T) { - xv := [4]int32{1, 2, 3, 4} - yv := [4]int32{5, 6, 7, 8} - want := []int32{6, 8, 10, 12} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - x = x.Add(y) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - func TestVectorConversion(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") @@ -151,64 +135,20 @@ func TestMaskConversion(t *testing.T) { } } -func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - xv := [4]int32{1, 2, 3, 4} - yv := [4]int32{5, 6, 7, 8} - // masking elements 1 and 2. - maskv := [4]int32{-1, -1, 0, 0} - want := []int32{6, 8, 0, 0} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - mask := simd.LoadInt32x4(&maskv).AsMask32x4() - x = x.MaskedAdd(y, mask) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } +func TestAdd(t *testing.T) { + testInt32x4Binary(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{6, 8, 10, 12}, "Add") } -func TestCompare(t *testing.T) { - xv := [4]int32{5, 1, 5, 3} - yv := [4]int32{3, 3, 3, 3} - want := []int32{8, 0, 8, 0} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) +func TestSub(t *testing.T) { + testInt32x4Binary(t, []int32{5, 5, 5, 3}, []int32{3, 3, 3, 3}, []int32{2, 2, 2, 0}, "Sub") +} + +func TestMaskedAdd(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") return } - mask := x.Greater(y) - x = x.MaskedAdd(y, mask) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - -func TestSub(t *testing.T) { - xv := [4]int32{5, 5, 5, 3} - yv := [4]int32{3, 3, 3, 3} - want := []int32{2, 2, 2, 0} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - x = x.Sub(y) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } + testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "MaskedAdd") } // checkInt8Slices ensures that b and a are equal, to the end of b. diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go new file mode 100644 index 00000000000000..8761097c44e1ec --- /dev/null +++ b/src/simd/simd_wrapped_test.go @@ -0,0 +1,6739 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd_test + +import ( + "simd" + "testing" +) + +func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Sqrt": + gotv = vec0.Sqrt() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x4()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x4() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x4() + case "Less": + gotv = vec0.Less(vec1).AsInt32x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x4() + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadFloat32x4Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadFloat32x4Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x4()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x4()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x4()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x4()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x8()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x8() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x8() + case "Less": + gotv = vec0.Less(vec1).AsInt32x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x8() + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadFloat32x8Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadFloat32x8Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x8()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x8()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x8()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x8()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "DotProdBroadcast": + gotv = vec0.DotProdBroadcast(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x2()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask64x2()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x2()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x2() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt64x2() + case "Less": + gotv = vec0.Less(vec1).AsInt64x2() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadFloat64x2Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadFloat64x2Slice(v2) + vec3 := simd.LoadInt64x2Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x2()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x2()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Unary(t *testing.T, v0 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x2()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x2()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask64x4()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadFloat64x4Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadFloat64x4Slice(v2) + vec3 := simd.LoadInt64x4Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x4()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x4()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Unary(t *testing.T, v0 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x4()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x4()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask64x8()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x8() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt64x8() + case "Less": + gotv = vec0.Less(vec1).AsInt64x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x8() + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadFloat64x8Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadFloat64x8Slice(v2) + vec3 := simd.LoadInt64x8Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x8()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x8()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Unary(t *testing.T, v0 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Sqrt": + gotv = vec0.Sqrt() + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x8()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x8()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedPairwiseAdd": + gotv = vec0.SaturatedPairwiseAdd(vec1) + case "SaturatedPairwiseSub": + gotv = vec0.SaturatedPairwiseSub(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x16() + case "Less": + gotv = vec0.Less(vec1).AsInt16x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x16() + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x32() + case "Less": + gotv = vec0.Less(vec1).AsInt16x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedPairwiseAdd": + gotv = vec0.SaturatedPairwiseAdd(vec1) + case "SaturatedPairwiseSub": + gotv = vec0.SaturatedPairwiseSub(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x8() + case "Less": + gotv = vec0.Less(vec1).AsInt16x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x8() + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x8()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x4() + case "Less": + gotv = vec0.Less(vec1).AsInt32x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x4() + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Int16x8Int16x8Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Uint8x16Int8x16Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Uint8x16Int8x16Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask32x4()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x8() + case "Less": + gotv = vec0.Less(vec1).AsInt32x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x8() + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Int16x16Int16x16Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Uint8x32Int8x32Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Uint8x32Int8x32Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask32x8()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x2() + case "Less": + gotv = vec0.Less(vec1).AsInt64x2() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x4()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x8() + case "Less": + gotv = vec0.Less(vec1).AsInt64x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x8() + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x8()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x16() + case "Less": + gotv = vec0.Less(vec1).AsInt8x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x16() + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x32() + case "Less": + gotv = vec0.Less(vec1).AsInt8x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x32() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x64() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x64() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x64() + case "Less": + gotv = vec0.Less(vec1).AsInt8x64() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x64() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x64() + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x16() + case "Less": + gotv = vec0.Less(vec1).AsInt16x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x16() + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x32() + case "Less": + gotv = vec0.Less(vec1).AsInt16x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x32() + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x8() + case "Less": + gotv = vec0.Less(vec1).AsInt16x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x8() + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x4() + case "Less": + gotv = vec0.Less(vec1).AsInt32x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x4() + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Uint8x16Int8x16Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x8() + case "Less": + gotv = vec0.Less(vec1).AsInt32x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x8() + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Uint8x32Int8x32Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x2() + case "Less": + gotv = vec0.Less(vec1).AsInt64x2() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Unary(t *testing.T, v0 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x8() + case "Less": + gotv = vec0.Less(vec1).AsInt64x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x8() + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x16() + case "Less": + gotv = vec0.Less(vec1).AsInt8x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x16() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x32() + case "Less": + gotv = vec0.Less(vec1).AsInt8x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x32() + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x64() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x64() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x64() + case "Less": + gotv = vec0.Less(vec1).AsInt8x64() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x64() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x64() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index aeb8c6bda7bde1..ceccf1cf61c12e 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7244,42 +7244,42 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z In /* SetElem */ -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRB, CPU Feature: AVX func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRW, CPU Feature: AVX func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX func (x Int32x4) SetElem(imm uint8, y int8) Int32x4 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRQ, CPU Feature: AVX func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRB, CPU Feature: AVX func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRW, CPU Feature: AVX func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX func (x Uint32x4) SetElem(imm uint8, y uint8) Uint32x4 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRQ, CPU Feature: AVX func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 From 4fda27c0cc5566f945adc6de88de294a3387830a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 24 Jun 2025 03:59:30 +0000 Subject: [PATCH 045/139] [dev.simd] cmd/compile: glue codes for Shift and Rotate This CL adds two more intrinsic lowering functions. They can issue an OpCopy to move a scalar value to vector value. This is needed by Shift and Rotate APIs. Change-Id: I8a83197d33207072c4a9221a931e67dddd5cd0bf Reviewed-on: https://go-review.googlesource.com/c/go/+/683476 Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/ssa.go | 44 +++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 82226ec1cdeace..1d90da2375d1a0 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1540,6 +1540,21 @@ func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// This function is to accustomize the shifts. +// The 2nd arg is an XMM, and this function merely checks that. +// Example instruction: VPSLLQ Z1, X1, Z2 +func simdFpXfp(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + // Vector registers operands follows a right-to-left order. + // e.g. VPSUBD X1, X2, X3 means X3 = X2 - X1. + p.From.Reg = v.Args[1].Reg() + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + // Example instruction: VPCMPEQW Z26, Z30, K4 func simdFp2k(s *ssagen.State, v *ssa.Value) *obj.Prog { // simdReg handles mask and vector registers altogether @@ -1563,6 +1578,20 @@ func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// This function is to accustomize the shifts. +// The 2nd arg is an XMM, and this function merely checks that. +// Example instruction: VPSLLQ Z1, X1, K1, Z2 +func simdFpXkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + // Example instruction: VPCMPEQW Z26, Z30, K1, K4 func simdFp2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { return simdFp2kfp(s, v) @@ -1664,6 +1693,10 @@ func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +func simdFp2kfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp2kkImm8(s, v) +} + // Example instruction: VFMADD213PD Z2, Z1, Z0 func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) @@ -1834,6 +1867,17 @@ func simdReg(v *ssa.Value) int16 { panic("unreachable") } +// XXX this is used for shift operations only. +// regalloc will issue OpCopy with incorrect type, but the assigned +// register should be correct, and this function is merely checking +// the sanity of this part. +func simdCheckRegOnly(v *ssa.Value, regStart, regEnd int16) int16 { + if v.Reg() > regEnd || v.Reg() < regStart { + panic("simdCheckRegOnly: not the desired register") + } + return v.Reg() +} + func simdMov(width int64) obj.As { if width >= 64 { return x86.AVMOVDQU64 From 0d8cb89f5c5acd69c6c9fc600c251cf880010e2d Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 24 Jun 2025 16:26:47 -0400 Subject: [PATCH 046/139] [dev.simd] cmd/compile: support simd(imm,fp) returns gp These changes are required to make gp-returning simd ops work. amd64/ssa.go includes a new code generator helper, gc/main.go initializes intrinsics AFTER the types, ssa/_gen/*AMD64.go add another register shape to the simd ops function. This CL should be submitted after simdgen CL 683858 which generated some of the changes. Change-Id: I0af752ba8882fa131b875ff9c741ef70afbc60d1 Reviewed-on: https://go-review.googlesource.com/c/go/+/683816 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 14 ++++++++++++++ src/cmd/compile/internal/gc/main.go | 6 +++++- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 2 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- src/simd/stubs_amd64.go | 4 ++-- 5 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 1d90da2375d1a0..0c9d12620afdb4 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1720,6 +1720,20 @@ func simdFp3kfpResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +func simdFpgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + return p +} + // Currently unused func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c486920f5b2b72..20899df04ddd3d 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -191,7 +191,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { ir.IsIntrinsicSym = ssagen.IsIntrinsicSym inline.SSADumpInline = ssagen.DumpInline ssagen.InitEnv() - ssagen.InitTables() types.PtrSize = ssagen.Arch.LinkArch.PtrSize types.RegSize = ssagen.Arch.LinkArch.RegSize @@ -205,6 +204,11 @@ func Main(archInit func(*ssagen.ArchInfo)) { typecheck.InitRuntime() rttype.Init() + // Some intrinsics (notably, the simd intrinsics) mention + // types "eagerly", thus ssagen must be initialized AFTER + // the type system is ready. + ssagen.InitTables() + // Parse and typecheck input. noder.LoadPackage(flag.Args()) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index e2cbc65957c75f..9ff77736f03cf7 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1301,7 +1301,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1, fpgp)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 9f8230946325af..88d90c2f85aadb 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index ceccf1cf61c12e..66ff8c545e6978 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7257,7 +7257,7 @@ func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 // SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(imm uint8, y int8) Int32x4 +func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 // SetElem sets a single constant-indexed element's value. // @@ -7277,7 +7277,7 @@ func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 // SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(imm uint8, y uint8) Uint32x4 +func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 // SetElem sets a single constant-indexed element's value. // From 7fadfa9638b8b2d7566677456dbd31acbc7c42cc Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 24 Jun 2025 18:29:38 -0400 Subject: [PATCH 047/139] [dev.simd] cmd/compile: add simd VPEXTRA* This CL is generated by simdgen CL 683836 and this CL should be submitted after its generator. Change-Id: I1aa893b185826ad1f9fb60b85c75eda31f70623b Reviewed-on: https://go-review.googlesource.com/c/go/+/683797 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 6 + .../compile/internal/ssa/_gen/simdAMD64.rules | 8 ++ .../compile/internal/ssa/_gen/simdAMD64ops.go | 4 + .../internal/ssa/_gen/simdgenericOps.go | 8 ++ src/cmd/compile/internal/ssa/opGen.go | 116 +++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 120 ++++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 8 ++ src/simd/simd_test.go | 10 ++ src/simd/stubs_amd64.go | 42 ++++++ 9 files changed, 322 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 9364722c3a788d..5297680357844b 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -724,6 +724,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPINSRQ128: p = simdFpgpfpImm8(s, v) + case ssa.OpAMD64VPEXTRB128, + ssa.OpAMD64VPEXTRW128, + ssa.OpAMD64VPEXTRD128, + ssa.OpAMD64VPEXTRQ128: + p = simdFpgpImm8(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 615686166d1b4d..bb0476fc20c3e4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -251,6 +251,14 @@ (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) +(GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) +(GetElemInt64x2 [a] x) => (VPEXTRQ128 [a] x) +(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) +(GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) +(GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) +(GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) +(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 88d90c2f85aadb..93b136230d0778 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -643,16 +643,19 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -660,6 +663,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ca196cd9e19079..1c33483f4242f0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1372,13 +1372,21 @@ func simdGenericOps() []opData { {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 121727e1f6b003..7a1126d433f93c 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1836,16 +1836,19 @@ const ( OpAMD64VPCMPWMasked256 OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 + OpAMD64VPEXTRW128 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 OpAMD64VPINSRW128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 + OpAMD64VPEXTRD128 OpAMD64VPCMPD128 OpAMD64VPCMPDMasked128 OpAMD64VPINSRD128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 + OpAMD64VPEXTRQ128 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 OpAMD64VPINSRQ128 @@ -1853,6 +1856,7 @@ const ( OpAMD64VPCMPQMasked256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 + OpAMD64VPEXTRB128 OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 OpAMD64VPINSRB128 @@ -5479,13 +5483,21 @@ const ( OpRoundWithPrecisionFloat64x8 OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpGetElemInt16x8 OpSetElemInt16x8 + OpGetElemInt32x4 OpSetElemInt32x4 + OpGetElemInt64x2 OpSetElemInt64x2 + OpGetElemInt8x16 OpSetElemInt8x16 + OpGetElemUint16x8 OpSetElemUint16x8 + OpGetElemUint32x4 OpSetElemUint32x4 + OpGetElemUint64x2 OpSetElemUint64x2 + OpGetElemUint8x16 OpSetElemUint8x16 ) @@ -27718,6 +27730,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRW128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPW128", auxType: auxInt8, @@ -27798,6 +27824,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPD128", auxType: auxInt8, @@ -27877,6 +27917,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPQ128", auxType: auxInt8, @@ -27989,6 +28043,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRB128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPB128", auxType: auxInt8, @@ -63225,48 +63293,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "GetElemInt16x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemInt32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemInt64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemInt8x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt8x16", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint16x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint8x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint8x16", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 7ac8c22e879359..668024a00fb52b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1448,6 +1448,22 @@ func rewriteValueAMD64(v *Value) bool { case OpGetClosurePtr: v.Op = OpAMD64LoweredGetClosurePtr return true + case OpGetElemInt16x8: + return rewriteValueAMD64_OpGetElemInt16x8(v) + case OpGetElemInt32x4: + return rewriteValueAMD64_OpGetElemInt32x4(v) + case OpGetElemInt64x2: + return rewriteValueAMD64_OpGetElemInt64x2(v) + case OpGetElemInt8x16: + return rewriteValueAMD64_OpGetElemInt8x16(v) + case OpGetElemUint16x8: + return rewriteValueAMD64_OpGetElemUint16x8(v) + case OpGetElemUint32x4: + return rewriteValueAMD64_OpGetElemUint32x4(v) + case OpGetElemUint64x2: + return rewriteValueAMD64_OpGetElemUint64x2(v) + case OpGetElemUint8x16: + return rewriteValueAMD64_OpGetElemUint8x16(v) case OpGetG: return rewriteValueAMD64_OpGetG(v) case OpGreaterEqualFloat32x16: @@ -30549,6 +30565,110 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt16x8 [a] x) + // result: (VPEXTRW128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt32x4 [a] x) + // result: (VPEXTRD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt64x2 [a] x) + // result: (VPEXTRQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt8x16 [a] x) + // result: (VPEXTRB128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint16x8 [a] x) + // result: (VPEXTRW128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint32x4 [a] x) + // result: (VPEXTRD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint64x2 [a] x) + // result: (VPEXTRQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint8x16 [a] x) + // result: (VPEXTRB128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] // match: (GetG mem) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index db4d2499791863..5d6ae7e3c06b7a 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -262,6 +262,14 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) + addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) + addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) + addF(simdPackage, "Int64x2.GetElem", opLen1Imm8(ssa.OpGetElemInt64x2, types.Types[types.TINT64], 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GetElem", opLen1Imm8(ssa.OpGetElemUint8x16, types.Types[types.TUINT8], 0), sys.AMD64) + addF(simdPackage, "Uint16x8.GetElem", opLen1Imm8(ssa.OpGetElemUint16x8, types.Types[types.TUINT16], 0), sys.AMD64) + addF(simdPackage, "Uint32x4.GetElem", opLen1Imm8(ssa.OpGetElemUint32x4, types.Types[types.TUINT32], 0), sys.AMD64) + addF(simdPackage, "Uint64x2.GetElem", opLen1Imm8(ssa.OpGetElemUint64x2, types.Types[types.TUINT64], 0), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 6df634b428f5d8..084b0af53937e6 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -183,6 +183,16 @@ func TestSlicesInt8SetElem(t *testing.T) { checkInt8Slices(t, a, b) } +func TestSlicesInt8GetElem(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) + e := v.GetElem(2) + if e != a[2] { + t.Errorf("GetElem(2) = %d != a[2] = %d", e, a[2]) + } + +} func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 66ff8c545e6978..5037e4e024e1c9 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1426,6 +1426,48 @@ func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 +/* GetElem */ + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRB, CPU Feature: AVX512EVEX +func (x Int8x16) GetElem(imm8 uint8) int8 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRW, CPU Feature: AVX512EVEX +func (x Int16x8) GetElem(imm8 uint8) int16 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Int32x4) GetElem(imm8 uint8) int32 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Int64x2) GetElem(imm8 uint8) int64 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRB, CPU Feature: AVX512EVEX +func (x Uint8x16) GetElem(imm8 uint8) uint8 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRW, CPU Feature: AVX512EVEX +func (x Uint16x8) GetElem(imm8 uint8) uint16 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Uint32x4) GetElem(imm8 uint8) uint32 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Uint64x2) GetElem(imm8 uint8) uint64 + /* Greater */ // Greater compares for greater than. From 35b8cf7fed49ca61a2e202b98a27fb83e93f63ab Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 25 Jun 2025 15:58:17 -0400 Subject: [PATCH 048/139] [dev.simd] cmd/compile: tweak sort order in generator This CL is created by simdgen CL 684056 Change-Id: Ie4240098bbe701531ab82d5200e92857726f1ba7 Reviewed-on: https://go-review.googlesource.com/c/go/+/684076 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- .../compile/internal/ssa/_gen/simdAMD64.rules | 832 ++-- src/simd/simd_wrapped_test.go | 4198 ++++++++--------- 2 files changed, 2515 insertions(+), 2515 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index bb0476fc20c3e4..b21d58b4a44a21 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,807 +1,807 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. +(AbsoluteInt8x16 ...) => (VPABSB128 ...) +(AbsoluteInt8x32 ...) => (VPABSB256 ...) +(AbsoluteInt8x64 ...) => (VPABSB512 ...) +(AbsoluteInt16x8 ...) => (VPABSW128 ...) (AbsoluteInt16x16 ...) => (VPABSW256 ...) (AbsoluteInt16x32 ...) => (VPABSW512 ...) -(AbsoluteInt16x8 ...) => (VPABSW128 ...) -(AbsoluteInt32x16 ...) => (VPABSD512 ...) (AbsoluteInt32x4 ...) => (VPABSD128 ...) (AbsoluteInt32x8 ...) => (VPABSD256 ...) +(AbsoluteInt32x16 ...) => (VPABSD512 ...) (AbsoluteInt64x2 ...) => (VPABSQ128 ...) (AbsoluteInt64x4 ...) => (VPABSQ256 ...) (AbsoluteInt64x8 ...) => (VPABSQ512 ...) -(AbsoluteInt8x16 ...) => (VPABSB128 ...) -(AbsoluteInt8x32 ...) => (VPABSB256 ...) -(AbsoluteInt8x64 ...) => (VPABSB512 ...) -(AddFloat32x16 ...) => (VADDPS512 ...) (AddFloat32x4 ...) => (VADDPS128 ...) (AddFloat32x8 ...) => (VADDPS256 ...) +(AddFloat32x16 ...) => (VADDPS512 ...) (AddFloat64x2 ...) => (VADDPD128 ...) (AddFloat64x4 ...) => (VADDPD256 ...) (AddFloat64x8 ...) => (VADDPD512 ...) +(AddInt8x16 ...) => (VPADDB128 ...) +(AddInt8x32 ...) => (VPADDB256 ...) +(AddInt8x64 ...) => (VPADDB512 ...) +(AddInt16x8 ...) => (VPADDW128 ...) (AddInt16x16 ...) => (VPADDW256 ...) (AddInt16x32 ...) => (VPADDW512 ...) -(AddInt16x8 ...) => (VPADDW128 ...) -(AddInt32x16 ...) => (VPADDD512 ...) (AddInt32x4 ...) => (VPADDD128 ...) (AddInt32x8 ...) => (VPADDD256 ...) +(AddInt32x16 ...) => (VPADDD512 ...) (AddInt64x2 ...) => (VPADDQ128 ...) (AddInt64x4 ...) => (VPADDQ256 ...) (AddInt64x8 ...) => (VPADDQ512 ...) -(AddInt8x16 ...) => (VPADDB128 ...) -(AddInt8x32 ...) => (VPADDB256 ...) -(AddInt8x64 ...) => (VPADDB512 ...) +(AddUint8x16 ...) => (VPADDB128 ...) +(AddUint8x32 ...) => (VPADDB256 ...) +(AddUint8x64 ...) => (VPADDB512 ...) +(AddUint16x8 ...) => (VPADDW128 ...) (AddUint16x16 ...) => (VPADDW256 ...) (AddUint16x32 ...) => (VPADDW512 ...) -(AddUint16x8 ...) => (VPADDW128 ...) -(AddUint32x16 ...) => (VPADDD512 ...) (AddUint32x4 ...) => (VPADDD128 ...) (AddUint32x8 ...) => (VPADDD256 ...) +(AddUint32x16 ...) => (VPADDD512 ...) (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) -(AddUint8x16 ...) => (VPADDB128 ...) -(AddUint8x32 ...) => (VPADDB256 ...) -(AddUint8x64 ...) => (VPADDB512 ...) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) (AddSubFloat64x4 ...) => (VADDSUBPD256 ...) -(AndFloat32x16 ...) => (VANDPS512 ...) (AndFloat32x4 ...) => (VANDPS128 ...) (AndFloat32x8 ...) => (VANDPS256 ...) +(AndFloat32x16 ...) => (VANDPS512 ...) (AndFloat64x2 ...) => (VANDPD128 ...) (AndFloat64x4 ...) => (VANDPD256 ...) (AndFloat64x8 ...) => (VANDPD512 ...) -(AndInt16x16 ...) => (VPAND256 ...) +(AndInt8x16 ...) => (VPAND128 ...) +(AndInt8x32 ...) => (VPAND256 ...) (AndInt16x8 ...) => (VPAND128 ...) -(AndInt32x16 ...) => (VPANDD512 ...) +(AndInt16x16 ...) => (VPAND256 ...) (AndInt32x4 ...) => (VPAND128 ...) (AndInt32x8 ...) => (VPAND256 ...) +(AndInt32x16 ...) => (VPANDD512 ...) (AndInt64x2 ...) => (VPAND128 ...) (AndInt64x4 ...) => (VPAND256 ...) (AndInt64x8 ...) => (VPANDQ512 ...) -(AndInt8x16 ...) => (VPAND128 ...) -(AndInt8x32 ...) => (VPAND256 ...) -(AndUint16x16 ...) => (VPAND256 ...) +(AndUint8x16 ...) => (VPAND128 ...) +(AndUint8x32 ...) => (VPAND256 ...) (AndUint16x8 ...) => (VPAND128 ...) -(AndUint32x16 ...) => (VPANDD512 ...) +(AndUint16x16 ...) => (VPAND256 ...) (AndUint32x4 ...) => (VPAND128 ...) (AndUint32x8 ...) => (VPAND256 ...) +(AndUint32x16 ...) => (VPANDD512 ...) (AndUint64x2 ...) => (VPAND128 ...) (AndUint64x4 ...) => (VPAND256 ...) (AndUint64x8 ...) => (VPANDQ512 ...) -(AndUint8x16 ...) => (VPAND128 ...) -(AndUint8x32 ...) => (VPAND256 ...) -(AndNotFloat32x16 ...) => (VANDNPS512 ...) (AndNotFloat32x4 ...) => (VANDNPS128 ...) (AndNotFloat32x8 ...) => (VANDNPS256 ...) +(AndNotFloat32x16 ...) => (VANDNPS512 ...) (AndNotFloat64x2 ...) => (VANDNPD128 ...) (AndNotFloat64x4 ...) => (VANDNPD256 ...) (AndNotFloat64x8 ...) => (VANDNPD512 ...) -(AndNotInt16x16 ...) => (VPANDN256 ...) +(AndNotInt8x16 ...) => (VPANDN128 ...) +(AndNotInt8x32 ...) => (VPANDN256 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) -(AndNotInt32x16 ...) => (VPANDND512 ...) +(AndNotInt16x16 ...) => (VPANDN256 ...) (AndNotInt32x4 ...) => (VPANDN128 ...) (AndNotInt32x8 ...) => (VPANDN256 ...) +(AndNotInt32x16 ...) => (VPANDND512 ...) (AndNotInt64x2 ...) => (VPANDN128 ...) (AndNotInt64x4 ...) => (VPANDN256 ...) (AndNotInt64x8 ...) => (VPANDNQ512 ...) -(AndNotInt8x16 ...) => (VPANDN128 ...) -(AndNotInt8x32 ...) => (VPANDN256 ...) -(AndNotUint16x16 ...) => (VPANDN256 ...) +(AndNotUint8x16 ...) => (VPANDN128 ...) +(AndNotUint8x32 ...) => (VPANDN256 ...) (AndNotUint16x8 ...) => (VPANDN128 ...) -(AndNotUint32x16 ...) => (VPANDND512 ...) +(AndNotUint16x16 ...) => (VPANDN256 ...) (AndNotUint32x4 ...) => (VPANDN128 ...) (AndNotUint32x8 ...) => (VPANDN256 ...) +(AndNotUint32x16 ...) => (VPANDND512 ...) (AndNotUint64x2 ...) => (VPANDN128 ...) (AndNotUint64x4 ...) => (VPANDN256 ...) (AndNotUint64x8 ...) => (VPANDNQ512 ...) -(AndNotUint8x16 ...) => (VPANDN128 ...) -(AndNotUint8x32 ...) => (VPANDN256 ...) -(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) (ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) +(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) (ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) (ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) -(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) (ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) (ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) +(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) (ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) (ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) (ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) -(AverageUint16x16 ...) => (VPAVGW256 ...) -(AverageUint16x32 ...) => (VPAVGW512 ...) -(AverageUint16x8 ...) => (VPAVGW128 ...) (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) +(AverageUint16x8 ...) => (VPAVGW128 ...) +(AverageUint16x16 ...) => (VPAVGW256 ...) +(AverageUint16x32 ...) => (VPAVGW512 ...) (CeilFloat32x4 x) => (VROUNDPS128 [2] x) (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) (CeilFloat64x4 x) => (VROUNDPD256 [2] x) -(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+10] x) -(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) (CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) (CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) +(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) (CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) (CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) (CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+10] x) -(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) +(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) (DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) (DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+9] x) -(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) (DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) +(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) (DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) (DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+8] x) -(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) (DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+11] x) -(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) (DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) (DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) +(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) (DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) (DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) (DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) -(DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) +(DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) (DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) -(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) +(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) (EqualFloat64x2 x y) => (VCMPPD128 [0] x y) (EqualFloat64x4 x y) => (VCMPPD256 [0] x y) (EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) +(EqualInt8x16 ...) => (VPCMPEQB128 ...) +(EqualInt8x32 ...) => (VPCMPEQB256 ...) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) +(EqualInt16x8 ...) => (VPCMPEQW128 ...) (EqualInt16x16 ...) => (VPCMPEQW256 ...) (EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) -(EqualInt16x8 ...) => (VPCMPEQW128 ...) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) (EqualInt32x4 ...) => (VPCMPEQD128 ...) (EqualInt32x8 ...) => (VPCMPEQD256 ...) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) (EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) -(EqualInt8x16 ...) => (VPCMPEQB128 ...) -(EqualInt8x32 ...) => (VPCMPEQB256 ...) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) +(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) +(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) (EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) (EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) -(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) -(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) (EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) (EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) (EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) (EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) (EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) -(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) -(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) -(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) (FloorFloat32x4 x) => (VROUNDPS128 [1] x) (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) (FloorFloat64x4 x) => (VROUNDPD256 [1] x) -(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+9] x) -(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) (FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) (FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) +(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) -(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) (FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) (FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) +(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) (FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) (FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) (FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) -(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) (FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) (FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) +(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) (FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) (FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) (FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) -(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) (FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) (FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) +(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) (GetElemInt64x2 [a] x) => (VPEXTRQ128 [a] x) -(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) +(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) (GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) -(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) -(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat64x2 x y) => (VCMPPD128 [6] x y) (GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) (GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) +(GreaterInt8x16 ...) => (VPCMPGTB128 ...) +(GreaterInt8x32 ...) => (VPCMPGTB256 ...) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) +(GreaterInt16x8 ...) => (VPCMPGTW128 ...) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) (GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) -(GreaterInt16x8 ...) => (VPCMPGTW128 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) (GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) (GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) -(GreaterInt8x16 ...) => (VPCMPGTB128 ...) -(GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) (GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) (GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) -(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) (GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) (GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) (GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) (GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) (GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) -(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) -(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) (GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y) (GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) (GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y) (GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y) (GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) (GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) (GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) -(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) (GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) (GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) (GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) (GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) (GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) -(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) (GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) (GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) -(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) (GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) (GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) (GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) (GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) -(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) -(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) +(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) (IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) (IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) (IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) -(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) (LessFloat32x4 x y) => (VCMPPS128 [1] x y) (LessFloat32x8 x y) => (VCMPPS256 [1] x y) +(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) (LessFloat64x2 x y) => (VCMPPD128 [1] x y) (LessFloat64x4 x y) => (VCMPPD256 [1] x y) (LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) +(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) +(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) +(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) +(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) (LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) (LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) -(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) -(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) (LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) (LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) +(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) (LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) (LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) (LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) -(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) -(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) -(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) +(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) +(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) +(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) +(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) (LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) (LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) -(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) -(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) (LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) (LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) +(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) (LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) (LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) (LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) -(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) -(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) -(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) -(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) (LessEqualFloat32x4 x y) => (VCMPPS128 [2] x y) (LessEqualFloat32x8 x y) => (VCMPPS256 [2] x y) +(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) (LessEqualFloat64x2 x y) => (VCMPPD128 [2] x y) (LessEqualFloat64x4 x y) => (VCMPPD256 [2] x y) (LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) +(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) +(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) +(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) +(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) (LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) (LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) -(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) -(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) (LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) (LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) +(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) (LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) (LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) (LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) -(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) -(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) -(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) +(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) +(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) +(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) +(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) (LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) (LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) -(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) -(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) (LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) (LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) +(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) (LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) (LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) (LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) -(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) -(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) -(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) +(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) (MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) (MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAddInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedAddInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAddInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAddUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedAddUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAddUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) +(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) -(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) (MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) (MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) (MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) (MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) (MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) (MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) (MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) -(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedFusedMultiplyAddFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplyAddFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplyAddFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddSubFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedFusedMultiplyAddSubFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddSubFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplyAddSubFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplyAddSubFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplySubAddFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedFusedMultiplySubAddFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) -(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) (MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) (MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) +(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) (MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) (MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) (MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) -(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) (MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) (MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) (MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) (MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) (MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) (MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) (MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) (MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) (MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) (MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) (MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) (MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) (MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) (MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) (MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) (MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) (MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) (MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) (MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) (MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) (MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) (MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) (MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) (MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) (MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) (MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) @@ -811,288 +811,288 @@ (MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) (MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) (MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) (MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) (MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) (MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) (MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) (MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) (MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) (MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) (MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) -(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedRoundWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) (MaskedRoundWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) (MaskedRoundWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedSubFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) -(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedXorInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedXorInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedXorUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedXorUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaxFloat32x16 ...) => (VMAXPS512 ...) (MaxFloat32x4 ...) => (VMAXPS128 ...) (MaxFloat32x8 ...) => (VMAXPS256 ...) +(MaxFloat32x16 ...) => (VMAXPS512 ...) (MaxFloat64x2 ...) => (VMAXPD128 ...) (MaxFloat64x4 ...) => (VMAXPD256 ...) (MaxFloat64x8 ...) => (VMAXPD512 ...) +(MaxInt8x16 ...) => (VPMAXSB128 ...) +(MaxInt8x32 ...) => (VPMAXSB256 ...) +(MaxInt8x64 ...) => (VPMAXSB512 ...) +(MaxInt16x8 ...) => (VPMAXSW128 ...) (MaxInt16x16 ...) => (VPMAXSW256 ...) (MaxInt16x32 ...) => (VPMAXSW512 ...) -(MaxInt16x8 ...) => (VPMAXSW128 ...) -(MaxInt32x16 ...) => (VPMAXSD512 ...) (MaxInt32x4 ...) => (VPMAXSD128 ...) (MaxInt32x8 ...) => (VPMAXSD256 ...) +(MaxInt32x16 ...) => (VPMAXSD512 ...) (MaxInt64x2 ...) => (VPMAXSQ128 ...) (MaxInt64x4 ...) => (VPMAXSQ256 ...) (MaxInt64x8 ...) => (VPMAXSQ512 ...) -(MaxInt8x16 ...) => (VPMAXSB128 ...) -(MaxInt8x32 ...) => (VPMAXSB256 ...) -(MaxInt8x64 ...) => (VPMAXSB512 ...) +(MaxUint8x16 ...) => (VPMAXUB128 ...) +(MaxUint8x32 ...) => (VPMAXUB256 ...) +(MaxUint8x64 ...) => (VPMAXUB512 ...) +(MaxUint16x8 ...) => (VPMAXUW128 ...) (MaxUint16x16 ...) => (VPMAXUW256 ...) (MaxUint16x32 ...) => (VPMAXUW512 ...) -(MaxUint16x8 ...) => (VPMAXUW128 ...) -(MaxUint32x16 ...) => (VPMAXUD512 ...) (MaxUint32x4 ...) => (VPMAXUD128 ...) (MaxUint32x8 ...) => (VPMAXUD256 ...) +(MaxUint32x16 ...) => (VPMAXUD512 ...) (MaxUint64x2 ...) => (VPMAXUQ128 ...) (MaxUint64x4 ...) => (VPMAXUQ256 ...) (MaxUint64x8 ...) => (VPMAXUQ512 ...) -(MaxUint8x16 ...) => (VPMAXUB128 ...) -(MaxUint8x32 ...) => (VPMAXUB256 ...) -(MaxUint8x64 ...) => (VPMAXUB512 ...) -(MinFloat32x16 ...) => (VMINPS512 ...) (MinFloat32x4 ...) => (VMINPS128 ...) (MinFloat32x8 ...) => (VMINPS256 ...) +(MinFloat32x16 ...) => (VMINPS512 ...) (MinFloat64x2 ...) => (VMINPD128 ...) (MinFloat64x4 ...) => (VMINPD256 ...) (MinFloat64x8 ...) => (VMINPD512 ...) +(MinInt8x16 ...) => (VPMINSB128 ...) +(MinInt8x32 ...) => (VPMINSB256 ...) +(MinInt8x64 ...) => (VPMINSB512 ...) +(MinInt16x8 ...) => (VPMINSW128 ...) (MinInt16x16 ...) => (VPMINSW256 ...) (MinInt16x32 ...) => (VPMINSW512 ...) -(MinInt16x8 ...) => (VPMINSW128 ...) -(MinInt32x16 ...) => (VPMINSD512 ...) (MinInt32x4 ...) => (VPMINSD128 ...) (MinInt32x8 ...) => (VPMINSD256 ...) +(MinInt32x16 ...) => (VPMINSD512 ...) (MinInt64x2 ...) => (VPMINSQ128 ...) (MinInt64x4 ...) => (VPMINSQ256 ...) (MinInt64x8 ...) => (VPMINSQ512 ...) -(MinInt8x16 ...) => (VPMINSB128 ...) -(MinInt8x32 ...) => (VPMINSB256 ...) -(MinInt8x64 ...) => (VPMINSB512 ...) +(MinUint8x16 ...) => (VPMINUB128 ...) +(MinUint8x32 ...) => (VPMINUB256 ...) +(MinUint8x64 ...) => (VPMINUB512 ...) +(MinUint16x8 ...) => (VPMINUW128 ...) (MinUint16x16 ...) => (VPMINUW256 ...) (MinUint16x32 ...) => (VPMINUW512 ...) -(MinUint16x8 ...) => (VPMINUW128 ...) -(MinUint32x16 ...) => (VPMINUD512 ...) (MinUint32x4 ...) => (VPMINUD128 ...) (MinUint32x8 ...) => (VPMINUD256 ...) +(MinUint32x16 ...) => (VPMINUD512 ...) (MinUint64x2 ...) => (VPMINUQ128 ...) (MinUint64x4 ...) => (VPMINUQ256 ...) (MinUint64x8 ...) => (VPMINUQ512 ...) -(MinUint8x16 ...) => (VPMINUB128 ...) -(MinUint8x32 ...) => (VPMINUB256 ...) -(MinUint8x64 ...) => (VPMINUB512 ...) -(MulFloat32x16 ...) => (VMULPS512 ...) (MulFloat32x4 ...) => (VMULPS128 ...) (MulFloat32x8 ...) => (VMULPS256 ...) +(MulFloat32x16 ...) => (VMULPS512 ...) (MulFloat64x2 ...) => (VMULPD128 ...) (MulFloat64x4 ...) => (VMULPD256 ...) (MulFloat64x8 ...) => (VMULPD512 ...) -(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) (MulByPowOf2Float32x4 ...) => (VSCALEFPS128 ...) (MulByPowOf2Float32x8 ...) => (VSCALEFPS256 ...) +(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) (MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) (MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) (MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) @@ -1106,282 +1106,282 @@ (MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) (MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) +(MulHighInt16x8 ...) => (VPMULHW128 ...) (MulHighInt16x16 ...) => (VPMULHW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) -(MulHighInt16x8 ...) => (VPMULHW128 ...) +(MulHighUint16x8 ...) => (VPMULHUW128 ...) (MulHighUint16x16 ...) => (VPMULHUW256 ...) (MulHighUint16x32 ...) => (VPMULHUW512 ...) -(MulHighUint16x8 ...) => (VPMULHUW128 ...) +(MulLowInt16x8 ...) => (VPMULLW128 ...) (MulLowInt16x16 ...) => (VPMULLW256 ...) (MulLowInt16x32 ...) => (VPMULLW512 ...) -(MulLowInt16x8 ...) => (VPMULLW128 ...) -(MulLowInt32x16 ...) => (VPMULLD512 ...) (MulLowInt32x4 ...) => (VPMULLD128 ...) (MulLowInt32x8 ...) => (VPMULLD256 ...) +(MulLowInt32x16 ...) => (VPMULLD512 ...) (MulLowInt64x2 ...) => (VPMULLQ128 ...) (MulLowInt64x4 ...) => (VPMULLQ256 ...) (MulLowInt64x8 ...) => (VPMULLQ512 ...) -(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) +(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) (NotEqualFloat64x2 x y) => (VCMPPD128 [4] x y) (NotEqualFloat64x4 x y) => (VCMPPD256 [4] x y) (NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) +(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) +(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) +(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) +(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) (NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) (NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) -(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) -(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) (NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) (NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) +(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) (NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) (NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) (NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) -(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) -(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) -(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) +(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) +(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) +(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) +(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) (NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) (NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) -(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) -(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) (NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) (NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) +(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) (NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) (NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) -(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) -(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) -(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) -(OrFloat32x16 ...) => (VORPS512 ...) (OrFloat32x4 ...) => (VORPS128 ...) (OrFloat32x8 ...) => (VORPS256 ...) +(OrFloat32x16 ...) => (VORPS512 ...) (OrFloat64x2 ...) => (VORPD128 ...) (OrFloat64x4 ...) => (VORPD256 ...) (OrFloat64x8 ...) => (VORPD512 ...) -(OrInt16x16 ...) => (VPOR256 ...) +(OrInt8x16 ...) => (VPOR128 ...) +(OrInt8x32 ...) => (VPOR256 ...) (OrInt16x8 ...) => (VPOR128 ...) -(OrInt32x16 ...) => (VPORD512 ...) +(OrInt16x16 ...) => (VPOR256 ...) (OrInt32x4 ...) => (VPOR128 ...) (OrInt32x8 ...) => (VPOR256 ...) +(OrInt32x16 ...) => (VPORD512 ...) (OrInt64x2 ...) => (VPOR128 ...) (OrInt64x4 ...) => (VPOR256 ...) (OrInt64x8 ...) => (VPORQ512 ...) -(OrInt8x16 ...) => (VPOR128 ...) -(OrInt8x32 ...) => (VPOR256 ...) -(OrUint16x16 ...) => (VPOR256 ...) +(OrUint8x16 ...) => (VPOR128 ...) +(OrUint8x32 ...) => (VPOR256 ...) (OrUint16x8 ...) => (VPOR128 ...) -(OrUint32x16 ...) => (VPORD512 ...) +(OrUint16x16 ...) => (VPOR256 ...) (OrUint32x4 ...) => (VPOR128 ...) (OrUint32x8 ...) => (VPOR256 ...) +(OrUint32x16 ...) => (VPORD512 ...) (OrUint64x2 ...) => (VPOR128 ...) (OrUint64x4 ...) => (VPOR256 ...) (OrUint64x8 ...) => (VPORQ512 ...) -(OrUint8x16 ...) => (VPOR128 ...) -(OrUint8x32 ...) => (VPOR256 ...) +(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) -(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) -(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) (PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) (PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) +(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) (PairwiseAddFloat64x4 ...) => (VHADDPD256 ...) -(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) (PairwiseAddInt16x8 ...) => (VPHADDW128 ...) +(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) (PairwiseAddInt32x4 ...) => (VPHADDD128 ...) (PairwiseAddInt32x8 ...) => (VPHADDD256 ...) -(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) (PairwiseAddUint16x8 ...) => (VPHADDW128 ...) +(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) (PairwiseAddUint32x4 ...) => (VPHADDD128 ...) (PairwiseAddUint32x8 ...) => (VPHADDD256 ...) (PairwiseSubFloat32x4 ...) => (VHSUBPS128 ...) (PairwiseSubFloat32x8 ...) => (VHSUBPS256 ...) (PairwiseSubFloat64x2 ...) => (VHSUBPD128 ...) (PairwiseSubFloat64x4 ...) => (VHSUBPD256 ...) -(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) (PairwiseSubInt16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) (PairwiseSubInt32x4 ...) => (VPHSUBD128 ...) (PairwiseSubInt32x8 ...) => (VPHSUBD256 ...) -(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) (PairwiseSubUint16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) (PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) (PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) +(PopCountInt8x16 ...) => (VPOPCNTB128 ...) +(PopCountInt8x32 ...) => (VPOPCNTB256 ...) +(PopCountInt8x64 ...) => (VPOPCNTB512 ...) +(PopCountInt16x8 ...) => (VPOPCNTW128 ...) (PopCountInt16x16 ...) => (VPOPCNTW256 ...) (PopCountInt16x32 ...) => (VPOPCNTW512 ...) -(PopCountInt16x8 ...) => (VPOPCNTW128 ...) -(PopCountInt32x16 ...) => (VPOPCNTD512 ...) (PopCountInt32x4 ...) => (VPOPCNTD128 ...) (PopCountInt32x8 ...) => (VPOPCNTD256 ...) +(PopCountInt32x16 ...) => (VPOPCNTD512 ...) (PopCountInt64x2 ...) => (VPOPCNTQ128 ...) (PopCountInt64x4 ...) => (VPOPCNTQ256 ...) (PopCountInt64x8 ...) => (VPOPCNTQ512 ...) -(PopCountInt8x16 ...) => (VPOPCNTB128 ...) -(PopCountInt8x32 ...) => (VPOPCNTB256 ...) -(PopCountInt8x64 ...) => (VPOPCNTB512 ...) +(PopCountUint8x16 ...) => (VPOPCNTB128 ...) +(PopCountUint8x32 ...) => (VPOPCNTB256 ...) +(PopCountUint8x64 ...) => (VPOPCNTB512 ...) +(PopCountUint16x8 ...) => (VPOPCNTW128 ...) (PopCountUint16x16 ...) => (VPOPCNTW256 ...) (PopCountUint16x32 ...) => (VPOPCNTW512 ...) -(PopCountUint16x8 ...) => (VPOPCNTW128 ...) -(PopCountUint32x16 ...) => (VPOPCNTD512 ...) (PopCountUint32x4 ...) => (VPOPCNTD128 ...) (PopCountUint32x8 ...) => (VPOPCNTD256 ...) +(PopCountUint32x16 ...) => (VPOPCNTD512 ...) (PopCountUint64x2 ...) => (VPOPCNTQ128 ...) (PopCountUint64x4 ...) => (VPOPCNTQ256 ...) (PopCountUint64x8 ...) => (VPOPCNTQ512 ...) -(PopCountUint8x16 ...) => (VPOPCNTB128 ...) -(PopCountUint8x32 ...) => (VPOPCNTB256 ...) -(PopCountUint8x64 ...) => (VPOPCNTB512 ...) (RoundFloat32x4 x) => (VROUNDPS128 [0] x) (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) (RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+8] x) -(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) (RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) (RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) (RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) (RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) (RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) -(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) -(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) -(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) (SaturatedAddInt8x16 ...) => (VPADDSB128 ...) (SaturatedAddInt8x32 ...) => (VPADDSB256 ...) (SaturatedAddInt8x64 ...) => (VPADDSB512 ...) -(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) -(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) -(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) +(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) +(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) +(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) (SaturatedAddUint8x16 ...) => (VPADDSB128 ...) (SaturatedAddUint8x32 ...) => (VPADDSB256 ...) (SaturatedAddUint8x64 ...) => (VPADDSB512 ...) -(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) +(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) +(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) (SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) (SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) -(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) +(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) -(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) +(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) -(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) +(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) (SaturatedSubInt8x16 ...) => (VPSUBSB128 ...) (SaturatedSubInt8x32 ...) => (VPSUBSB256 ...) (SaturatedSubInt8x64 ...) => (VPSUBSB512 ...) -(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) (SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) (SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) (SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) +(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) (SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) (SetElemInt64x2 [a] x y) => (VPINSRQ128 [a] x y) -(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) +(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) (SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) (SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) -(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) -(SignInt16x16 ...) => (VPSIGNW256 ...) +(SignInt8x16 ...) => (VPSIGNB128 ...) +(SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) +(SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt32x4 ...) => (VPSIGND128 ...) (SignInt32x8 ...) => (VPSIGND256 ...) -(SignInt8x16 ...) => (VPSIGNB128 ...) -(SignInt8x32 ...) => (VPSIGNB256 ...) -(SqrtFloat32x16 ...) => (VSQRTPS512 ...) (SqrtFloat32x4 ...) => (VSQRTPS128 ...) (SqrtFloat32x8 ...) => (VSQRTPS256 ...) +(SqrtFloat32x16 ...) => (VSQRTPS512 ...) (SqrtFloat64x2 ...) => (VSQRTPD128 ...) (SqrtFloat64x4 ...) => (VSQRTPD256 ...) (SqrtFloat64x8 ...) => (VSQRTPD512 ...) -(SubFloat32x16 ...) => (VSUBPS512 ...) (SubFloat32x4 ...) => (VSUBPS128 ...) (SubFloat32x8 ...) => (VSUBPS256 ...) +(SubFloat32x16 ...) => (VSUBPS512 ...) (SubFloat64x2 ...) => (VSUBPD128 ...) (SubFloat64x4 ...) => (VSUBPD256 ...) (SubFloat64x8 ...) => (VSUBPD512 ...) +(SubInt8x16 ...) => (VPSUBB128 ...) +(SubInt8x32 ...) => (VPSUBB256 ...) +(SubInt8x64 ...) => (VPSUBB512 ...) +(SubInt16x8 ...) => (VPSUBW128 ...) (SubInt16x16 ...) => (VPSUBW256 ...) (SubInt16x32 ...) => (VPSUBW512 ...) -(SubInt16x8 ...) => (VPSUBW128 ...) -(SubInt32x16 ...) => (VPSUBD512 ...) (SubInt32x4 ...) => (VPSUBD128 ...) (SubInt32x8 ...) => (VPSUBD256 ...) +(SubInt32x16 ...) => (VPSUBD512 ...) (SubInt64x2 ...) => (VPSUBQ128 ...) (SubInt64x4 ...) => (VPSUBQ256 ...) (SubInt64x8 ...) => (VPSUBQ512 ...) -(SubInt8x16 ...) => (VPSUBB128 ...) -(SubInt8x32 ...) => (VPSUBB256 ...) -(SubInt8x64 ...) => (VPSUBB512 ...) +(SubUint8x16 ...) => (VPSUBB128 ...) +(SubUint8x32 ...) => (VPSUBB256 ...) +(SubUint8x64 ...) => (VPSUBB512 ...) +(SubUint16x8 ...) => (VPSUBW128 ...) (SubUint16x16 ...) => (VPSUBW256 ...) (SubUint16x32 ...) => (VPSUBW512 ...) -(SubUint16x8 ...) => (VPSUBW128 ...) -(SubUint32x16 ...) => (VPSUBD512 ...) (SubUint32x4 ...) => (VPSUBD128 ...) (SubUint32x8 ...) => (VPSUBD256 ...) +(SubUint32x16 ...) => (VPSUBD512 ...) (SubUint64x2 ...) => (VPSUBQ128 ...) (SubUint64x4 ...) => (VPSUBQ256 ...) (SubUint64x8 ...) => (VPSUBQ512 ...) -(SubUint8x16 ...) => (VPSUBB128 ...) -(SubUint8x32 ...) => (VPSUBB256 ...) -(SubUint8x64 ...) => (VPSUBB512 ...) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) (TruncFloat64x4 x) => (VROUNDPD256 [3] x) -(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+11] x) -(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) (TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) (TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) +(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) (TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) (TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) (TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) -(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) -(XorFloat32x16 ...) => (VXORPS512 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) (XorFloat32x4 ...) => (VXORPS128 ...) (XorFloat32x8 ...) => (VXORPS256 ...) +(XorFloat32x16 ...) => (VXORPS512 ...) (XorFloat64x2 ...) => (VXORPD128 ...) (XorFloat64x4 ...) => (VXORPD256 ...) (XorFloat64x8 ...) => (VXORPD512 ...) -(XorInt16x16 ...) => (VPXOR256 ...) +(XorInt8x16 ...) => (VPXOR128 ...) +(XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) -(XorInt32x16 ...) => (VPXORD512 ...) +(XorInt16x16 ...) => (VPXOR256 ...) (XorInt32x4 ...) => (VPXOR128 ...) (XorInt32x8 ...) => (VPXOR256 ...) +(XorInt32x16 ...) => (VPXORD512 ...) (XorInt64x2 ...) => (VPXOR128 ...) (XorInt64x4 ...) => (VPXOR256 ...) (XorInt64x8 ...) => (VPXORQ512 ...) -(XorInt8x16 ...) => (VPXOR128 ...) -(XorInt8x32 ...) => (VPXOR256 ...) -(XorUint16x16 ...) => (VPXOR256 ...) +(XorUint8x16 ...) => (VPXOR128 ...) +(XorUint8x32 ...) => (VPXOR256 ...) (XorUint16x8 ...) => (VPXOR128 ...) -(XorUint32x16 ...) => (VPXORD512 ...) +(XorUint16x16 ...) => (VPXOR256 ...) (XorUint32x4 ...) => (VPXOR128 ...) (XorUint32x8 ...) => (VPXOR256 ...) +(XorUint32x16 ...) => (VPXORD512 ...) (XorUint64x2 ...) => (VPXOR128 ...) (XorUint64x4 ...) => (VPXOR256 ...) (XorUint64x8 ...) => (VPXORQ512 ...) -(XorUint8x16 ...) => (VPXOR128 ...) -(XorUint8x32 ...) => (VPXOR256 ...) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 8761097c44e1ec..b5f6bb517a318f 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -9,258 +9,6 @@ import ( "testing" ) -func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Sqrt": - gotv = vec0.Sqrt() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { t.Helper() var gotv simd.Float32x4 @@ -793,10 +541,262 @@ func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []flo } } -func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { +func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Sqrt": + gotv = vec0.Sqrt() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) vec0 := simd.LoadFloat64x2Slice(v0) vec1 := simd.LoadFloat64x2Slice(v1) switch which { @@ -1579,12 +1579,12 @@ func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo } } -func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -1596,22 +1596,10 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) - case "SaturatedPairwiseAdd": - gotv = vec0.SaturatedPairwiseAdd(vec1) - case "SaturatedPairwiseSub": - gotv = vec0.SaturatedPairwiseSub(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) case "Sign": @@ -1622,7 +1610,7 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1632,76 +1620,29 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic } } -func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1711,28 +1652,28 @@ func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, } } -func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() + gotv = vec0.Equal(vec1).AsInt8x16() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() + gotv = vec0.Greater(vec1).AsInt8x16() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() + gotv = vec0.GreaterEqual(vec1).AsInt8x16() case "Less": - gotv = vec0.Less(vec1).AsInt16x16() + gotv = vec0.Less(vec1).AsInt8x16() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() + gotv = vec0.LessEqual(vec1).AsInt8x16() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() + gotv = vec0.NotEqual(vec1).AsInt8x16() default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1742,29 +1683,29 @@ func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } -func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1774,11 +1715,11 @@ func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } -func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { +func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -1786,7 +1727,7 @@ func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1796,20 +1737,20 @@ func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { } } -func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) + gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1819,32 +1760,38 @@ func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, } } -func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1854,33 +1801,29 @@ func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic } } -func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1890,19 +1833,28 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } -func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { +func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x32() + case "Less": + gotv = vec0.Less(vec1).AsInt8x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x32() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1912,18 +1864,29 @@ func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []in } } -func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { +func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1933,28 +1896,136 @@ func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, } } -func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() + gotv = vec0.Equal(vec1).AsInt8x64() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() + gotv = vec0.Greater(vec1).AsInt8x64() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() + gotv = vec0.GreaterEqual(vec1).AsInt8x64() case "Less": - gotv = vec0.Less(vec1).AsInt16x32() + gotv = vec0.Less(vec1).AsInt8x64() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() + gotv = vec0.LessEqual(vec1).AsInt8x64() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() + gotv = vec0.NotEqual(vec1).AsInt8x64() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1964,29 +2035,29 @@ func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } -func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1996,11 +2067,11 @@ func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } -func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { +func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -2008,7 +2079,7 @@ func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2018,20 +2089,20 @@ func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { } } -func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) + gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2281,12 +2352,12 @@ func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, } } -func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -2298,17 +2369,33 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) case "MulLow": gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedPairwiseAdd": + gotv = vec0.SaturatedPairwiseAdd(vec1) + case "SaturatedPairwiseSub": + gotv = vec0.SaturatedPairwiseSub(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2318,35 +2405,33 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic } } -func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2356,28 +2441,19 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Int32x8 got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2387,21 +2463,18 @@ func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whi } } -func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { +func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Int32x8 got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2411,22 +2484,28 @@ func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, } } -func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { +func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x16() + case "Less": + gotv = vec0.Less(vec1).AsInt16x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x16() default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2436,78 +2515,29 @@ func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 } } -func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2517,11 +2547,11 @@ func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 [ } } -func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { +func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -2529,7 +2559,7 @@ func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2539,20 +2569,20 @@ func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { } } -func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) + gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2562,40 +2592,32 @@ func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, } } -func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) case "MulLow": gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int32x4.%s", which) + t.Errorf("Unknown method: Int16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2605,28 +2627,258 @@ func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which } } -func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x32() + case "Less": + gotv = vec0.Less(vec1).AsInt16x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": @@ -3178,12 +3430,12 @@ func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, } } -func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3195,8 +3447,6 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) case "MulLow": gotv = vec0.MulLow(vec1) case "Or": @@ -3207,7 +3457,7 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3217,37 +3467,35 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which } } -func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { +func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3257,28 +3505,28 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w } } -func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() + gotv = vec0.Equal(vec1).AsInt32x16() case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() + gotv = vec0.Greater(vec1).AsInt32x16() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() + gotv = vec0.GreaterEqual(vec1).AsInt32x16() case "Less": - gotv = vec0.Less(vec1).AsInt64x2() + gotv = vec0.Less(vec1).AsInt32x16() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() + gotv = vec0.LessEqual(vec1).AsInt32x16() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() + gotv = vec0.NotEqual(vec1).AsInt32x16() default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3288,29 +3536,78 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } -func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { +func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3320,11 +3617,60 @@ func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } -func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { +func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -3332,7 +3678,7 @@ func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3342,20 +3688,20 @@ func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { } } -func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3365,12 +3711,12 @@ func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, } } -func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3394,7 +3740,7 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int64x4.%s", which) + t.Errorf("Unknown method: Int64x2.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3404,37 +3750,37 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which } } -func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { +func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) default: - t.Errorf("Unknown method: Int64x4.%s", which) + t.Errorf("Unknown method: Int64x2.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3444,23 +3790,210 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w } } -func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() + gotv = vec0.Equal(vec1).AsInt64x2() case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() + gotv = vec0.Greater(vec1).AsInt64x2() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() + gotv = vec0.GreaterEqual(vec1).AsInt64x2() case "Less": - gotv = vec0.Less(vec1).AsInt64x4() + gotv = vec0.Less(vec1).AsInt64x2() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() case "NotEqual": gotv = vec0.NotEqual(vec1).AsInt64x4() @@ -3739,12 +4272,12 @@ func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, } } -func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3752,6 +4285,8 @@ func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -3762,15 +4297,13 @@ func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3780,16 +4313,18 @@ func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st } } -func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) case "MaskedMin": @@ -3802,7 +4337,7 @@ func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3812,12 +4347,12 @@ func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want } } -func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt8x16() @@ -3833,7 +4368,7 @@ func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s gotv = vec0.NotEqual(vec1).AsInt8x16() default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3843,29 +4378,18 @@ func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } -func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3875,19 +4399,19 @@ func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan } } -func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { +func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3897,20 +4421,70 @@ func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { } } -func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) case "MaskedPopCount": gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3920,12 +4494,12 @@ func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi } } -func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3933,6 +4507,8 @@ func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -3943,15 +4519,13 @@ func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3961,16 +4535,18 @@ func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st } } -func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) case "MaskedMin": @@ -3983,7 +4559,7 @@ func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3993,12 +4569,12 @@ func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want } } -func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt8x32() @@ -4014,7 +4590,7 @@ func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s gotv = vec0.NotEqual(vec1).AsInt8x32() default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4024,12 +4600,55 @@ func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } -func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { case "MaskedEqual": @@ -4046,7 +4665,7 @@ func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4056,19 +4675,17 @@ func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan } } -func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { +func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) switch which { - case "Absolute": - gotv = vec0.Absolute() case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4078,20 +4695,18 @@ func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { } } -func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) case "MaskedPopCount": gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4101,15 +4716,17 @@ func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi } } -func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) + case "Average": + gotv = vec0.Average(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4122,7 +4739,7 @@ func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.Sub(vec1) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4132,16 +4749,18 @@ func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st } } -func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) case "MaskedMin": @@ -4154,7 +4773,7 @@ func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4164,12 +4783,12 @@ func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want } } -func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt8x64() @@ -4185,7 +4804,7 @@ func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s gotv = vec0.NotEqual(vec1).AsInt8x64() default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4195,29 +4814,18 @@ func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } -func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4227,19 +4835,19 @@ func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan } } -func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { +func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4249,20 +4857,70 @@ func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { } } -func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) case "MaskedPopCount": gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4272,12 +4930,12 @@ func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi } } -func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { +func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -4309,7 +4967,7 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4319,33 +4977,33 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, } } -func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { +func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4355,28 +5013,28 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } -func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { +func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { t.Helper() - var gotv simd.Int16x16 + var gotv simd.Int16x8 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() + gotv = vec0.Equal(vec1).AsInt16x8() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() + gotv = vec0.Greater(vec1).AsInt16x8() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() + gotv = vec0.GreaterEqual(vec1).AsInt16x8() case "Less": - gotv = vec0.Less(vec1).AsInt16x16() + gotv = vec0.Less(vec1).AsInt16x8() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() + gotv = vec0.LessEqual(vec1).AsInt16x8() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() + gotv = vec0.NotEqual(vec1).AsInt16x8() default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4386,29 +5044,29 @@ func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } -func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { +func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int16x16 + var gotv simd.Int16x8 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4418,17 +5076,17 @@ func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } -func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { +func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) + vec0 := simd.LoadUint16x8Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4438,18 +5096,18 @@ func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) } } -func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { +func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4459,15 +5117,19 @@ func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint } } -func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { +func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) case "Max": @@ -4476,15 +5138,23 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.Min(vec1) case "MulHigh": gotv = vec0.MulHigh(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) case "Sub": gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4494,33 +5164,33 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, } } -func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { +func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4530,28 +5200,28 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } -func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { +func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { t.Helper() - var gotv simd.Int16x32 + var gotv simd.Int16x16 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() + gotv = vec0.Equal(vec1).AsInt16x16() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() + gotv = vec0.Greater(vec1).AsInt16x16() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() + gotv = vec0.GreaterEqual(vec1).AsInt16x16() case "Less": - gotv = vec0.Less(vec1).AsInt16x32() + gotv = vec0.Less(vec1).AsInt16x16() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() + gotv = vec0.LessEqual(vec1).AsInt16x16() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() + gotv = vec0.NotEqual(vec1).AsInt16x16() default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4561,29 +5231,29 @@ func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } -func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { +func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int16x32 + var gotv simd.Int16x16 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4593,17 +5263,17 @@ func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } -func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { +func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) + vec0 := simd.LoadUint16x16Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4613,18 +5283,18 @@ func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) } } -func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { +func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4634,19 +5304,15 @@ func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint } } -func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { +func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) case "Max": @@ -4655,23 +5321,15 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w gotv = vec0.Min(vec1) case "MulHigh": gotv = vec0.MulHigh(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4681,33 +5339,33 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w } } -func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { +func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4717,28 +5375,28 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 } } -func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { +func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { t.Helper() - var gotv simd.Int16x8 + var gotv simd.Int16x32 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x8() + gotv = vec0.Equal(vec1).AsInt16x32() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x8() + gotv = vec0.Greater(vec1).AsInt16x32() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x8() + gotv = vec0.GreaterEqual(vec1).AsInt16x32() case "Less": - gotv = vec0.Less(vec1).AsInt16x8() + gotv = vec0.Less(vec1).AsInt16x32() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x8() + gotv = vec0.LessEqual(vec1).AsInt16x32() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x8() + gotv = vec0.NotEqual(vec1).AsInt16x32() default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4748,29 +5406,29 @@ func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, w } } -func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { +func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int16x8 + var gotv simd.Int16x32 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4780,17 +5438,17 @@ func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } -func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { +func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) + vec0 := simd.LoadUint16x32Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4800,18 +5458,18 @@ func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { } } -func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { +func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4821,12 +5479,12 @@ func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint1 } } -func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { +func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { t.Helper() - var gotv simd.Uint32x16 + var gotv simd.Uint32x4 got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -4840,13 +5498,17 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, gotv = vec0.Min(vec1) case "Or": gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint32x16.%s", which) + t.Errorf("Unknown method: Uint32x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4856,33 +5518,33 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, } } -func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { +func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { t.Helper() - var gotv simd.Uint32x16 + var gotv simd.Uint32x4 got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) default: - t.Errorf("Unknown method: Uint32x16.%s", which) + t.Errorf("Unknown method: Uint32x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4892,28 +5554,49 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 } } -func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { +func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() + gotv = vec0.Equal(vec1).AsInt32x4() case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() + gotv = vec0.Greater(vec1).AsInt32x4() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() + gotv = vec0.GreaterEqual(vec1).AsInt32x4() case "Less": - gotv = vec0.Less(vec1).AsInt32x16() + gotv = vec0.Less(vec1).AsInt32x4() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() + gotv = vec0.LessEqual(vec1).AsInt32x4() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() + gotv = vec0.NotEqual(vec1).AsInt32x4() default: - t.Errorf("Unknown method: Uint32x16.%s", which) + t.Errorf("Unknown method: Uint32x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4923,269 +5606,20 @@ func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, } } -func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { +func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Int32x4 got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedLess": gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedLessEqual": @@ -5543,14 +5977,238 @@ func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint3 } } -func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { +func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - switch which { - case "Add": + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + switch which { + case "Add": gotv = vec0.Add(vec1) case "And": gotv = vec0.And(vec1) @@ -5796,652 +6454,29 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 } } } - -func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x16() - case "Less": - gotv = vec0.Less(vec1).AsInt8x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x32() - case "Less": - gotv = vec0.Less(vec1).AsInt8x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x32() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + +func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6451,29 +6486,29 @@ func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v } } -func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { +func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6483,17 +6518,17 @@ func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, } } -func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { +func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6503,18 +6538,18 @@ func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { } } -func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { +func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6524,30 +6559,34 @@ func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, } } -func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { +func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) - case "Average": - gotv = vec0.Average(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) case "Sub": gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6557,31 +6596,35 @@ func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic } } -func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { +func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6591,71 +6634,28 @@ func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w } } -func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { +func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt8x64() + gotv = vec0.Equal(vec1).AsInt64x8() case "Greater": - gotv = vec0.Greater(vec1).AsInt8x64() + gotv = vec0.Greater(vec1).AsInt64x8() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x64() + gotv = vec0.GreaterEqual(vec1).AsInt64x8() case "Less": - gotv = vec0.Less(vec1).AsInt8x64() + gotv = vec0.Less(vec1).AsInt64x8() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x64() + gotv = vec0.LessEqual(vec1).AsInt64x8() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x64() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) + gotv = vec0.NotEqual(vec1).AsInt64x8() default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6665,29 +6665,29 @@ func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v } } -func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { +func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6697,17 +6697,17 @@ func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, } } -func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { +func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6717,18 +6717,18 @@ func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { } } -func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { +func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { From e61ebfce564086e5e2d634b0d138d96b6e34c19a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 24 Jun 2025 15:21:29 +0000 Subject: [PATCH 049/139] [dev.simd] cmd/compile, simd: add shift operations This CL is generated by CL 683475. Change-Id: I9e3ac6aff6f711cb26ff85e4c8729d9e2cc38e7d Reviewed-on: https://go-review.googlesource.com/c/go/+/683715 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 312 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 398 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 204 + .../internal/ssa/_gen/simdgenericOps.go | 398 + src/cmd/compile/internal/ssa/opGen.go | 38208 +++++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 5973 ++- .../compile/internal/ssagen/simdintrinsics.go | 398 + src/simd/simd_wrapped_test.go | 1281 +- src/simd/stubs_amd64.go | 4526 +- 9 files changed, 33976 insertions(+), 17722 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 5297680357844b..6c1d365bfa7f9b 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -247,6 +247,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBD128, ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPROLVD128, + ssa.OpAMD64VPROLVD256, + ssa.OpAMD64VPROLVD512, + ssa.OpAMD64VPROLVQ128, + ssa.OpAMD64VPROLVQ256, + ssa.OpAMD64VPROLVQ512, + ssa.OpAMD64VPRORVD128, + ssa.OpAMD64VPRORVD256, + ssa.OpAMD64VPRORVD512, + ssa.OpAMD64VPRORVQ128, + ssa.OpAMD64VPRORVQ256, + ssa.OpAMD64VPRORVQ512, ssa.OpAMD64VPADDSB128, ssa.OpAMD64VPADDSB256, ssa.OpAMD64VPADDSB512, @@ -266,6 +278,33 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSW128, ssa.OpAMD64VPMADDUBSW256, ssa.OpAMD64VPMADDUBSW512, + ssa.OpAMD64VPSLLVW128, + ssa.OpAMD64VPSLLVW256, + ssa.OpAMD64VPSLLVW512, + ssa.OpAMD64VPSLLVD128, + ssa.OpAMD64VPSLLVD256, + ssa.OpAMD64VPSLLVD512, + ssa.OpAMD64VPSLLVQ128, + ssa.OpAMD64VPSLLVQ256, + ssa.OpAMD64VPSLLVQ512, + ssa.OpAMD64VPSRLVW128, + ssa.OpAMD64VPSRLVW256, + ssa.OpAMD64VPSRLVW512, + ssa.OpAMD64VPSRLVD128, + ssa.OpAMD64VPSRLVD256, + ssa.OpAMD64VPSRLVD512, + ssa.OpAMD64VPSRLVQ128, + ssa.OpAMD64VPSRLVQ256, + ssa.OpAMD64VPSRLVQ512, + ssa.OpAMD64VPSRAVW128, + ssa.OpAMD64VPSRAVW256, + ssa.OpAMD64VPSRAVW512, + ssa.OpAMD64VPSRAVD128, + ssa.OpAMD64VPSRAVD256, + ssa.OpAMD64VPSRAVD512, + ssa.OpAMD64VPSRAVQ128, + ssa.OpAMD64VPSRAVQ256, + ssa.OpAMD64VPSRAVQ512, ssa.OpAMD64VPSIGNB128, ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, @@ -464,6 +503,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPROLVDMasked128, + ssa.OpAMD64VPROLVDMasked256, + ssa.OpAMD64VPROLVDMasked512, + ssa.OpAMD64VPROLVQMasked128, + ssa.OpAMD64VPROLVQMasked256, + ssa.OpAMD64VPROLVQMasked512, + ssa.OpAMD64VPRORVDMasked128, + ssa.OpAMD64VPRORVDMasked256, + ssa.OpAMD64VPRORVDMasked512, + ssa.OpAMD64VPRORVQMasked128, + ssa.OpAMD64VPRORVQMasked256, + ssa.OpAMD64VPRORVQMasked512, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, @@ -479,6 +530,33 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VPSLLVWMasked128, + ssa.OpAMD64VPSLLVWMasked256, + ssa.OpAMD64VPSLLVWMasked512, + ssa.OpAMD64VPSLLVDMasked128, + ssa.OpAMD64VPSLLVDMasked256, + ssa.OpAMD64VPSLLVDMasked512, + ssa.OpAMD64VPSLLVQMasked128, + ssa.OpAMD64VPSLLVQMasked256, + ssa.OpAMD64VPSLLVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, + ssa.OpAMD64VPSRAVWMasked128, + ssa.OpAMD64VPSRAVWMasked256, + ssa.OpAMD64VPSRAVWMasked512, + ssa.OpAMD64VPSRAVDMasked128, + ssa.OpAMD64VPSRAVDMasked256, + ssa.OpAMD64VPSRAVDMasked512, + ssa.OpAMD64VPSRAVQMasked128, + ssa.OpAMD64VPSRAVQMasked256, + ssa.OpAMD64VPSRAVQMasked512, ssa.OpAMD64VSUBPSMasked128, ssa.OpAMD64VSUBPSMasked256, ssa.OpAMD64VSUBPSMasked512, @@ -570,7 +648,19 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPS512, ssa.OpAMD64VREDUCEPD128, ssa.OpAMD64VREDUCEPD256, - ssa.OpAMD64VREDUCEPD512: + ssa.OpAMD64VREDUCEPD512, + ssa.OpAMD64VPROLD128, + ssa.OpAMD64VPROLD256, + ssa.OpAMD64VPROLD512, + ssa.OpAMD64VPROLQ128, + ssa.OpAMD64VPROLQ256, + ssa.OpAMD64VPROLQ512, + ssa.OpAMD64VPRORD128, + ssa.OpAMD64VPRORD256, + ssa.OpAMD64VPRORD512, + ssa.OpAMD64VPRORQ128, + ssa.OpAMD64VPRORQ256, + ssa.OpAMD64VPRORQ512: p = simdFp11Imm8(s, v) case ssa.OpAMD64VRNDSCALEPSMasked128, @@ -584,14 +674,44 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, - ssa.OpAMD64VREDUCEPDMasked512: + ssa.OpAMD64VREDUCEPDMasked512, + ssa.OpAMD64VPROLDMasked128, + ssa.OpAMD64VPROLDMasked256, + ssa.OpAMD64VPROLDMasked512, + ssa.OpAMD64VPROLQMasked128, + ssa.OpAMD64VPROLQMasked256, + ssa.OpAMD64VPROLQMasked512, + ssa.OpAMD64VPRORDMasked128, + ssa.OpAMD64VPRORDMasked256, + ssa.OpAMD64VPRORDMasked512, + ssa.OpAMD64VPRORQMasked128, + ssa.OpAMD64VPRORQMasked256, + ssa.OpAMD64VPRORQMasked512: p = simdFpkfpImm8(s, v) case ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPD256: + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPSHLDW128, + ssa.OpAMD64VPSHLDW256, + ssa.OpAMD64VPSHLDW512, + ssa.OpAMD64VPSHLDD128, + ssa.OpAMD64VPSHLDD256, + ssa.OpAMD64VPSHLDD512, + ssa.OpAMD64VPSHLDQ128, + ssa.OpAMD64VPSHLDQ256, + ssa.OpAMD64VPSHLDQ512, + ssa.OpAMD64VPSHRDW128, + ssa.OpAMD64VPSHRDW256, + ssa.OpAMD64VPSHRDW512, + ssa.OpAMD64VPSHRDD128, + ssa.OpAMD64VPSHRDD256, + ssa.OpAMD64VPSHRDD512, + ssa.OpAMD64VPSHRDQ128, + ssa.OpAMD64VPSHRDQ256, + ssa.OpAMD64VPSHRDQ512: p = simdFp21Imm8(s, v) case ssa.OpAMD64VCMPPS512, @@ -681,6 +801,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDS128, ssa.OpAMD64VPDPBUSDS256, ssa.OpAMD64VPDPBUSDS512, + ssa.OpAMD64VPSHLDVW128, + ssa.OpAMD64VPSHLDVW256, + ssa.OpAMD64VPSHLDVW512, + ssa.OpAMD64VPSHLDVD128, + ssa.OpAMD64VPSHLDVD256, + ssa.OpAMD64VPSHLDVD512, + ssa.OpAMD64VPSHLDVQ128, + ssa.OpAMD64VPSHLDVQ256, + ssa.OpAMD64VPSHLDVQ512, + ssa.OpAMD64VPSHRDVW128, + ssa.OpAMD64VPSHRDVW256, + ssa.OpAMD64VPSHRDVW512, + ssa.OpAMD64VPSHRDVD128, + ssa.OpAMD64VPSHRDVD256, + ssa.OpAMD64VPSHRDVD512, + ssa.OpAMD64VPSHRDVQ128, + ssa.OpAMD64VPSHRDVQ256, + ssa.OpAMD64VPSHRDVQ512, ssa.OpAMD64VPDPBUSD128, ssa.OpAMD64VPDPBUSD256, ssa.OpAMD64VPDPBUSD512: @@ -713,11 +851,63 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPSHLDVWMasked128, + ssa.OpAMD64VPSHLDVWMasked256, + ssa.OpAMD64VPSHLDVWMasked512, + ssa.OpAMD64VPSHLDVDMasked128, + ssa.OpAMD64VPSHLDVDMasked256, + ssa.OpAMD64VPSHLDVDMasked512, + ssa.OpAMD64VPSHLDVQMasked128, + ssa.OpAMD64VPSHLDVQMasked256, + ssa.OpAMD64VPSHLDVQMasked512, + ssa.OpAMD64VPSHRDVWMasked128, + ssa.OpAMD64VPSHRDVWMasked256, + ssa.OpAMD64VPSHRDVWMasked512, + ssa.OpAMD64VPSHRDVDMasked128, + ssa.OpAMD64VPSHRDVDMasked256, + ssa.OpAMD64VPSHRDVDMasked512, + ssa.OpAMD64VPSHRDVQMasked128, + ssa.OpAMD64VPSHRDVQMasked256, + ssa.OpAMD64VPSHRDVQMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512: p = simdFp3kfpResultInArg0(s, v) + case ssa.OpAMD64VPSLLW128, + ssa.OpAMD64VPSLLW256, + ssa.OpAMD64VPSLLD128, + ssa.OpAMD64VPSLLD256, + ssa.OpAMD64VPSLLQ128, + ssa.OpAMD64VPSLLQ256, + ssa.OpAMD64VPSLLQ512, + ssa.OpAMD64VPSRLW128, + ssa.OpAMD64VPSRLW256, + ssa.OpAMD64VPSRLD128, + ssa.OpAMD64VPSRLD256, + ssa.OpAMD64VPSRLQ128, + ssa.OpAMD64VPSRLQ256, + ssa.OpAMD64VPSRLQ512, + ssa.OpAMD64VPSRAW128, + ssa.OpAMD64VPSRAW256, + ssa.OpAMD64VPSRAD128, + ssa.OpAMD64VPSRAD256, + ssa.OpAMD64VPSRAQ128, + ssa.OpAMD64VPSRAQ256, + ssa.OpAMD64VPSRAQ512: + p = simdFpXfp(s, v) + + case ssa.OpAMD64VPSLLQMasked128, + ssa.OpAMD64VPSLLQMasked256, + ssa.OpAMD64VPSLLQMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSRAQMasked128, + ssa.OpAMD64VPSRAQMasked256, + ssa.OpAMD64VPSRAQMasked512: + p = simdFpXkfp(s, v) + case ssa.OpAMD64VPINSRB128, ssa.OpAMD64VPINSRW128, ssa.OpAMD64VPINSRD128, @@ -730,6 +920,26 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRQ128: p = simdFpgpImm8(s, v) + case ssa.OpAMD64VPSHLDWMasked128, + ssa.OpAMD64VPSHLDWMasked256, + ssa.OpAMD64VPSHLDWMasked512, + ssa.OpAMD64VPSHLDDMasked128, + ssa.OpAMD64VPSHLDDMasked256, + ssa.OpAMD64VPSHLDDMasked512, + ssa.OpAMD64VPSHLDQMasked128, + ssa.OpAMD64VPSHLDQMasked256, + ssa.OpAMD64VPSHLDQMasked512, + ssa.OpAMD64VPSHRDWMasked128, + ssa.OpAMD64VPSHRDWMasked256, + ssa.OpAMD64VPSHRDWMasked512, + ssa.OpAMD64VPSHRDDMasked128, + ssa.OpAMD64VPSHRDDMasked256, + ssa.OpAMD64VPSHRDDMasked512, + ssa.OpAMD64VPSHRDQMasked128, + ssa.OpAMD64VPSHRDQMasked256, + ssa.OpAMD64VPSHRDQMasked512: + p = simdFp2kfpImm8(s, v) + default: // Unknown reg shape return false @@ -968,6 +1178,30 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPROLDMasked128, + ssa.OpAMD64VPROLDMasked256, + ssa.OpAMD64VPROLDMasked512, + ssa.OpAMD64VPROLQMasked128, + ssa.OpAMD64VPROLQMasked256, + ssa.OpAMD64VPROLQMasked512, + ssa.OpAMD64VPRORDMasked128, + ssa.OpAMD64VPRORDMasked256, + ssa.OpAMD64VPRORDMasked512, + ssa.OpAMD64VPRORQMasked128, + ssa.OpAMD64VPRORQMasked256, + ssa.OpAMD64VPRORQMasked512, + ssa.OpAMD64VPROLVDMasked128, + ssa.OpAMD64VPROLVDMasked256, + ssa.OpAMD64VPROLVDMasked512, + ssa.OpAMD64VPROLVQMasked128, + ssa.OpAMD64VPROLVQMasked256, + ssa.OpAMD64VPROLVQMasked512, + ssa.OpAMD64VPRORVDMasked128, + ssa.OpAMD64VPRORVDMasked256, + ssa.OpAMD64VPRORVDMasked512, + ssa.OpAMD64VPRORVQMasked128, + ssa.OpAMD64VPRORVQMasked256, + ssa.OpAMD64VPRORVQMasked512, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, @@ -989,6 +1223,78 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPSLLQMasked128, + ssa.OpAMD64VPSLLQMasked256, + ssa.OpAMD64VPSLLQMasked512, + ssa.OpAMD64VPSHLDWMasked128, + ssa.OpAMD64VPSHLDWMasked256, + ssa.OpAMD64VPSHLDWMasked512, + ssa.OpAMD64VPSHLDDMasked128, + ssa.OpAMD64VPSHLDDMasked256, + ssa.OpAMD64VPSHLDDMasked512, + ssa.OpAMD64VPSHLDQMasked128, + ssa.OpAMD64VPSHLDQMasked256, + ssa.OpAMD64VPSHLDQMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSHRDWMasked128, + ssa.OpAMD64VPSHRDWMasked256, + ssa.OpAMD64VPSHRDWMasked512, + ssa.OpAMD64VPSHRDDMasked128, + ssa.OpAMD64VPSHRDDMasked256, + ssa.OpAMD64VPSHRDDMasked512, + ssa.OpAMD64VPSHRDQMasked128, + ssa.OpAMD64VPSHRDQMasked256, + ssa.OpAMD64VPSHRDQMasked512, + ssa.OpAMD64VPSRAQMasked128, + ssa.OpAMD64VPSRAQMasked256, + ssa.OpAMD64VPSRAQMasked512, + ssa.OpAMD64VPSLLVWMasked128, + ssa.OpAMD64VPSLLVWMasked256, + ssa.OpAMD64VPSLLVWMasked512, + ssa.OpAMD64VPSLLVDMasked128, + ssa.OpAMD64VPSLLVDMasked256, + ssa.OpAMD64VPSLLVDMasked512, + ssa.OpAMD64VPSLLVQMasked128, + ssa.OpAMD64VPSLLVQMasked256, + ssa.OpAMD64VPSLLVQMasked512, + ssa.OpAMD64VPSHLDVWMasked128, + ssa.OpAMD64VPSHLDVWMasked256, + ssa.OpAMD64VPSHLDVWMasked512, + ssa.OpAMD64VPSHLDVDMasked128, + ssa.OpAMD64VPSHLDVDMasked256, + ssa.OpAMD64VPSHLDVDMasked512, + ssa.OpAMD64VPSHLDVQMasked128, + ssa.OpAMD64VPSHLDVQMasked256, + ssa.OpAMD64VPSHLDVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, + ssa.OpAMD64VPSHRDVWMasked128, + ssa.OpAMD64VPSHRDVWMasked256, + ssa.OpAMD64VPSHRDVWMasked512, + ssa.OpAMD64VPSHRDVDMasked128, + ssa.OpAMD64VPSHRDVDMasked256, + ssa.OpAMD64VPSHRDVDMasked512, + ssa.OpAMD64VPSHRDVQMasked128, + ssa.OpAMD64VPSHRDVQMasked256, + ssa.OpAMD64VPSHRDVQMasked512, + ssa.OpAMD64VPSRAVWMasked128, + ssa.OpAMD64VPSRAVWMasked256, + ssa.OpAMD64VPSRAVWMasked512, + ssa.OpAMD64VPSRAVDMasked128, + ssa.OpAMD64VPSRAVDMasked256, + ssa.OpAMD64VPSRAVDMasked512, + ssa.OpAMD64VPSRAVQMasked128, + ssa.OpAMD64VPSRAVQMasked256, + ssa.OpAMD64VPSRAVQMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, ssa.OpAMD64VSQRTPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index b21d58b4a44a21..968ded213133a2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -904,6 +904,54 @@ (MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllLeftInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllLeftInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllLeftInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllLeftInt64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllLeftInt64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllLeftInt64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllLeftUint32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllLeftUint32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllLeftUint32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllLeftUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllLeftUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllLeftUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllRightInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllRightInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllRightInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllRightInt64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllRightInt64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllRightInt64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllRightUint32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllRightUint32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllRightUint32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllRightUint64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllRightUint64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllRightUint64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateLeftInt32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateLeftInt32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateLeftInt32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateLeftInt64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateLeftInt64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateLeftInt64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedRotateLeftUint32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateLeftUint32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateLeftUint32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateLeftUint64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateLeftUint64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateLeftUint64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedRotateRightInt32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateRightInt32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateRightInt32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateRightInt64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateRightInt64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateRightInt64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedRotateRightUint32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateRightUint32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateRightUint32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateRightUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateRightUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateRightUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) @@ -952,6 +1000,147 @@ (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftAllLeftInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllLeftUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightSignExtendedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightSignExtendedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightSignExtendedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftInt32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftInt32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftInt32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftInt64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftInt64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftInt64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftUint16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftUint16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftUint16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftUint32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftUint32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftUint32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftRightInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftRightUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightUint32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightUint32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightUint32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftRightSignExtendedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightSignExtendedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightSignExtendedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightSignExtendedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightSignExtendedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightSignExtendedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightSignExtendedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightSignExtendedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightSignExtendedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftRightSignExtendedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightSignExtendedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightSignExtendedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightSignExtendedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightSignExtendedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightSignExtendedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightSignExtendedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightSignExtendedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightSignExtendedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) (MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) @@ -1231,6 +1420,54 @@ (PopCountUint64x2 ...) => (VPOPCNTQ128 ...) (PopCountUint64x4 ...) => (VPOPCNTQ256 ...) (PopCountUint64x8 ...) => (VPOPCNTQ512 ...) +(RotateAllLeftInt32x4 [a] x) => (VPROLD128 [a] x) +(RotateAllLeftInt32x8 [a] x) => (VPROLD256 [a] x) +(RotateAllLeftInt32x16 [a] x) => (VPROLD512 [a] x) +(RotateAllLeftInt64x2 [a] x) => (VPROLQ128 [a] x) +(RotateAllLeftInt64x4 [a] x) => (VPROLQ256 [a] x) +(RotateAllLeftInt64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllLeftUint32x4 [a] x) => (VPROLD128 [a] x) +(RotateAllLeftUint32x8 [a] x) => (VPROLD256 [a] x) +(RotateAllLeftUint32x16 [a] x) => (VPROLD512 [a] x) +(RotateAllLeftUint64x2 [a] x) => (VPROLQ128 [a] x) +(RotateAllLeftUint64x4 [a] x) => (VPROLQ256 [a] x) +(RotateAllLeftUint64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllRightInt32x4 [a] x) => (VPRORD128 [a] x) +(RotateAllRightInt32x8 [a] x) => (VPRORD256 [a] x) +(RotateAllRightInt32x16 [a] x) => (VPRORD512 [a] x) +(RotateAllRightInt64x2 [a] x) => (VPRORQ128 [a] x) +(RotateAllRightInt64x4 [a] x) => (VPRORQ256 [a] x) +(RotateAllRightInt64x8 [a] x) => (VPRORQ512 [a] x) +(RotateAllRightUint32x4 [a] x) => (VPRORD128 [a] x) +(RotateAllRightUint32x8 [a] x) => (VPRORD256 [a] x) +(RotateAllRightUint32x16 [a] x) => (VPRORD512 [a] x) +(RotateAllRightUint64x2 [a] x) => (VPRORQ128 [a] x) +(RotateAllRightUint64x4 [a] x) => (VPRORQ256 [a] x) +(RotateAllRightUint64x8 [a] x) => (VPRORQ512 [a] x) +(RotateLeftInt32x4 ...) => (VPROLVD128 ...) +(RotateLeftInt32x8 ...) => (VPROLVD256 ...) +(RotateLeftInt32x16 ...) => (VPROLVD512 ...) +(RotateLeftInt64x2 ...) => (VPROLVQ128 ...) +(RotateLeftInt64x4 ...) => (VPROLVQ256 ...) +(RotateLeftInt64x8 ...) => (VPROLVQ512 ...) +(RotateLeftUint32x4 ...) => (VPROLVD128 ...) +(RotateLeftUint32x8 ...) => (VPROLVD256 ...) +(RotateLeftUint32x16 ...) => (VPROLVD512 ...) +(RotateLeftUint64x2 ...) => (VPROLVQ128 ...) +(RotateLeftUint64x4 ...) => (VPROLVQ256 ...) +(RotateLeftUint64x8 ...) => (VPROLVQ512 ...) +(RotateRightInt32x4 ...) => (VPRORVD128 ...) +(RotateRightInt32x8 ...) => (VPRORVD256 ...) +(RotateRightInt32x16 ...) => (VPRORVD512 ...) +(RotateRightInt64x2 ...) => (VPRORVQ128 ...) +(RotateRightInt64x4 ...) => (VPRORVQ256 ...) +(RotateRightInt64x8 ...) => (VPRORVQ512 ...) +(RotateRightUint32x4 ...) => (VPRORVD128 ...) +(RotateRightUint32x8 ...) => (VPRORVD256 ...) +(RotateRightUint32x16 ...) => (VPRORVD512 ...) +(RotateRightUint64x2 ...) => (VPRORVQ128 ...) +(RotateRightUint64x4 ...) => (VPRORVQ256 ...) +(RotateRightUint64x8 ...) => (VPRORVQ512 ...) (RoundFloat32x4 x) => (VROUNDPS128 [0] x) (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) @@ -1295,6 +1532,167 @@ (SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) (SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) +(ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) +(ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) +(ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) +(ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) +(ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) +(ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) +(ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) +(ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) +(ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) +(ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) +(ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) => (VPSHLDW128 [a] x y) +(ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) => (VPSHLDW256 [a] x y) +(ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) => (VPSHLDW512 [a] x y) +(ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) => (VPSHLDD128 [a] x y) +(ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) => (VPSHLDD256 [a] x y) +(ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) => (VPSHLDD512 [a] x y) +(ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) => (VPSHLDQ128 [a] x y) +(ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) => (VPSHLDQ256 [a] x y) +(ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) => (VPSHLDW128 [a] x y) +(ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) => (VPSHLDW256 [a] x y) +(ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) => (VPSHLDW512 [a] x y) +(ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) => (VPSHLDD128 [a] x y) +(ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) => (VPSHLDD256 [a] x y) +(ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) => (VPSHLDD512 [a] x y) +(ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) => (VPSHLDQ128 [a] x y) +(ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) => (VPSHLDQ256 [a] x y) +(ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllRightInt16x8 ...) => (VPSRLW128 ...) +(ShiftAllRightInt16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightInt32x4 ...) => (VPSRLD128 ...) +(ShiftAllRightInt32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightInt64x2 ...) => (VPSRLQ128 ...) +(ShiftAllRightInt64x4 ...) => (VPSRLQ256 ...) +(ShiftAllRightInt64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) +(ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) +(ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) +(ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) +(ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightAndFillUpperFromInt16x8 [a] x y) => (VPSHRDW128 [a] x y) +(ShiftAllRightAndFillUpperFromInt16x16 [a] x y) => (VPSHRDW256 [a] x y) +(ShiftAllRightAndFillUpperFromInt16x32 [a] x y) => (VPSHRDW512 [a] x y) +(ShiftAllRightAndFillUpperFromInt32x4 [a] x y) => (VPSHRDD128 [a] x y) +(ShiftAllRightAndFillUpperFromInt32x8 [a] x y) => (VPSHRDD256 [a] x y) +(ShiftAllRightAndFillUpperFromInt32x16 [a] x y) => (VPSHRDD512 [a] x y) +(ShiftAllRightAndFillUpperFromInt64x2 [a] x y) => (VPSHRDQ128 [a] x y) +(ShiftAllRightAndFillUpperFromInt64x4 [a] x y) => (VPSHRDQ256 [a] x y) +(ShiftAllRightAndFillUpperFromInt64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightAndFillUpperFromUint16x8 [a] x y) => (VPSHRDW128 [a] x y) +(ShiftAllRightAndFillUpperFromUint16x16 [a] x y) => (VPSHRDW256 [a] x y) +(ShiftAllRightAndFillUpperFromUint16x32 [a] x y) => (VPSHRDW512 [a] x y) +(ShiftAllRightAndFillUpperFromUint32x4 [a] x y) => (VPSHRDD128 [a] x y) +(ShiftAllRightAndFillUpperFromUint32x8 [a] x y) => (VPSHRDD256 [a] x y) +(ShiftAllRightAndFillUpperFromUint32x16 [a] x y) => (VPSHRDD512 [a] x y) +(ShiftAllRightAndFillUpperFromUint64x2 [a] x y) => (VPSHRDQ128 [a] x y) +(ShiftAllRightAndFillUpperFromUint64x4 [a] x y) => (VPSHRDQ256 [a] x y) +(ShiftAllRightAndFillUpperFromUint64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightSignExtendedInt16x8 ...) => (VPSRAW128 ...) +(ShiftAllRightSignExtendedInt16x16 ...) => (VPSRAW256 ...) +(ShiftAllRightSignExtendedInt32x4 ...) => (VPSRAD128 ...) +(ShiftAllRightSignExtendedInt32x8 ...) => (VPSRAD256 ...) +(ShiftAllRightSignExtendedInt64x2 ...) => (VPSRAQ128 ...) +(ShiftAllRightSignExtendedInt64x4 ...) => (VPSRAQ256 ...) +(ShiftAllRightSignExtendedInt64x8 ...) => (VPSRAQ512 ...) +(ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) +(ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) +(ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) +(ShiftLeftInt32x4 ...) => (VPSLLVD128 ...) +(ShiftLeftInt32x8 ...) => (VPSLLVD256 ...) +(ShiftLeftInt32x16 ...) => (VPSLLVD512 ...) +(ShiftLeftInt64x2 ...) => (VPSLLVQ128 ...) +(ShiftLeftInt64x4 ...) => (VPSLLVQ256 ...) +(ShiftLeftInt64x8 ...) => (VPSLLVQ512 ...) +(ShiftLeftUint16x8 ...) => (VPSLLVW128 ...) +(ShiftLeftUint16x16 ...) => (VPSLLVW256 ...) +(ShiftLeftUint16x32 ...) => (VPSLLVW512 ...) +(ShiftLeftUint32x4 ...) => (VPSLLVD128 ...) +(ShiftLeftUint32x8 ...) => (VPSLLVD256 ...) +(ShiftLeftUint32x16 ...) => (VPSLLVD512 ...) +(ShiftLeftUint64x2 ...) => (VPSLLVQ128 ...) +(ShiftLeftUint64x4 ...) => (VPSLLVQ256 ...) +(ShiftLeftUint64x8 ...) => (VPSLLVQ512 ...) +(ShiftLeftAndFillUpperFromInt16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftAndFillUpperFromInt16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftAndFillUpperFromInt16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftAndFillUpperFromInt32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftAndFillUpperFromInt32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftAndFillUpperFromInt32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftAndFillUpperFromInt64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftAndFillUpperFromInt64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftAndFillUpperFromInt64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftAndFillUpperFromUint16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftAndFillUpperFromUint16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftAndFillUpperFromUint16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftAndFillUpperFromUint32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftAndFillUpperFromUint32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftAndFillUpperFromUint32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftAndFillUpperFromUint64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftAndFillUpperFromUint64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftAndFillUpperFromUint64x8 ...) => (VPSHLDVQ512 ...) +(ShiftRightInt16x8 ...) => (VPSRLVW128 ...) +(ShiftRightInt16x16 ...) => (VPSRLVW256 ...) +(ShiftRightInt16x32 ...) => (VPSRLVW512 ...) +(ShiftRightInt32x4 ...) => (VPSRLVD128 ...) +(ShiftRightInt32x8 ...) => (VPSRLVD256 ...) +(ShiftRightInt32x16 ...) => (VPSRLVD512 ...) +(ShiftRightInt64x2 ...) => (VPSRLVQ128 ...) +(ShiftRightInt64x4 ...) => (VPSRLVQ256 ...) +(ShiftRightInt64x8 ...) => (VPSRLVQ512 ...) +(ShiftRightUint16x8 ...) => (VPSRLVW128 ...) +(ShiftRightUint16x16 ...) => (VPSRLVW256 ...) +(ShiftRightUint16x32 ...) => (VPSRLVW512 ...) +(ShiftRightUint32x4 ...) => (VPSRLVD128 ...) +(ShiftRightUint32x8 ...) => (VPSRLVD256 ...) +(ShiftRightUint32x16 ...) => (VPSRLVD512 ...) +(ShiftRightUint64x2 ...) => (VPSRLVQ128 ...) +(ShiftRightUint64x4 ...) => (VPSRLVQ256 ...) +(ShiftRightUint64x8 ...) => (VPSRLVQ512 ...) +(ShiftRightAndFillUpperFromInt16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightAndFillUpperFromInt16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightAndFillUpperFromInt16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightAndFillUpperFromInt32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightAndFillUpperFromInt32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightAndFillUpperFromInt32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightAndFillUpperFromInt64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightAndFillUpperFromInt64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightAndFillUpperFromInt64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightAndFillUpperFromUint16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightAndFillUpperFromUint16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightAndFillUpperFromUint16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightAndFillUpperFromUint32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightAndFillUpperFromUint32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightAndFillUpperFromUint32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightAndFillUpperFromUint64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightAndFillUpperFromUint64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightAndFillUpperFromUint64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightSignExtendedInt16x8 ...) => (VPSRAVW128 ...) +(ShiftRightSignExtendedInt16x16 ...) => (VPSRAVW256 ...) +(ShiftRightSignExtendedInt16x32 ...) => (VPSRAVW512 ...) +(ShiftRightSignExtendedInt32x4 ...) => (VPSRAVD128 ...) +(ShiftRightSignExtendedInt32x8 ...) => (VPSRAVD256 ...) +(ShiftRightSignExtendedInt32x16 ...) => (VPSRAVD512 ...) +(ShiftRightSignExtendedInt64x2 ...) => (VPSRAVQ128 ...) +(ShiftRightSignExtendedInt64x4 ...) => (VPSRAVQ256 ...) +(ShiftRightSignExtendedInt64x8 ...) => (VPSRAVQ512 ...) +(ShiftRightSignExtendedUint16x8 ...) => (VPSRAVW128 ...) +(ShiftRightSignExtendedUint16x16 ...) => (VPSRAVW256 ...) +(ShiftRightSignExtendedUint16x32 ...) => (VPSRAVW512 ...) +(ShiftRightSignExtendedUint32x4 ...) => (VPSRAVD128 ...) +(ShiftRightSignExtendedUint32x8 ...) => (VPSRAVD256 ...) +(ShiftRightSignExtendedUint32x16 ...) => (VPSRAVD512 ...) +(ShiftRightSignExtendedUint64x2 ...) => (VPSRAVQ128 ...) +(ShiftRightSignExtendedUint64x4 ...) => (VPSRAVQ256 ...) +(ShiftRightSignExtendedUint64x8 ...) => (VPSRAVQ512 ...) (SignInt8x16 ...) => (VPSIGNB128 ...) (SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 93b136230d0778..cbddbe0ff6ef7d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -233,6 +233,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -246,6 +251,14 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW256", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW256", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVW256", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVW256", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVW256", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVW256", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -260,6 +273,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -269,6 +287,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVW512", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVW512", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVW512", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVW512", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -284,6 +307,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -297,6 +325,14 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW128", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW128", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVW128", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVW128", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVW128", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVW128", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -313,8 +349,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -324,8 +367,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVD512", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD512", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVD512", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVD512", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVD512", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -343,8 +393,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -356,8 +413,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLD128", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD128", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVD128", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVD128", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVD128", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVD128", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, @@ -375,8 +442,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -388,8 +462,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLD256", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD256", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD256", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVD256", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVD256", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVD256", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVD256", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVD256", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, @@ -406,12 +490,32 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ128", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ128", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ128", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ128", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVQ128", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQ128", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQ128", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQ128", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVQ128", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -427,12 +531,32 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ256", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ256", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVQ256", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQ256", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQ256", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQ256", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVQ256", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -448,6 +572,16 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -456,6 +590,16 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ512", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ512", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQ512", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQ512", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQ512", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVQ512", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -641,28 +785,88 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 1c33483f4242f0..0f3d3f8214cd22 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -345,6 +345,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt16x16", argLength: 3, commutative: false}, {name: "MaskedSubInt16x16", argLength: 3, commutative: false}, {name: "MaxInt16x16", argLength: 2, commutative: true}, {name: "MinInt16x16", argLength: 2, commutative: true}, @@ -360,6 +365,14 @@ func simdGenericOps() []opData { {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt16x16", argLength: 2, commutative: false}, {name: "SignInt16x16", argLength: 2, commutative: false}, {name: "SubInt16x16", argLength: 2, commutative: false}, {name: "XorInt16x16", argLength: 2, commutative: true}, @@ -386,6 +399,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt16x32", argLength: 3, commutative: false}, {name: "MaskedSubInt16x32", argLength: 3, commutative: false}, {name: "MaxInt16x32", argLength: 2, commutative: true}, {name: "MinInt16x32", argLength: 2, commutative: true}, @@ -396,6 +414,11 @@ func simdGenericOps() []opData { {name: "PopCountInt16x32", argLength: 1, commutative: false}, {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt16x32", argLength: 2, commutative: false}, {name: "SubInt16x32", argLength: 2, commutative: false}, {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, {name: "AddInt16x8", argLength: 2, commutative: true}, @@ -422,6 +445,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt16x8", argLength: 3, commutative: false}, {name: "MaskedSubInt16x8", argLength: 3, commutative: false}, {name: "MaxInt16x8", argLength: 2, commutative: true}, {name: "MinInt16x8", argLength: 2, commutative: true}, @@ -437,6 +465,14 @@ func simdGenericOps() []opData { {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt16x8", argLength: 2, commutative: false}, {name: "SignInt16x8", argLength: 2, commutative: false}, {name: "SubInt16x8", argLength: 2, commutative: false}, {name: "XorInt16x8", argLength: 2, commutative: true}, @@ -465,8 +501,15 @@ func simdGenericOps() []opData { {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, {name: "MaskedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt32x16", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt32x16", argLength: 3, commutative: false}, {name: "MaskedSaturatedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftInt32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt32x16", argLength: 3, commutative: false}, {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, @@ -477,8 +520,15 @@ func simdGenericOps() []opData { {name: "OrInt32x16", argLength: 2, commutative: true}, {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, + {name: "RotateRightInt32x16", argLength: 2, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt32x16", argLength: 2, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "XorInt32x16", argLength: 2, commutative: true}, @@ -507,8 +557,15 @@ func simdGenericOps() []opData { {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, {name: "MaskedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt32x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt32x4", argLength: 3, commutative: false}, {name: "MaskedSaturatedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftInt32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt32x4", argLength: 3, commutative: false}, {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, @@ -522,8 +579,18 @@ func simdGenericOps() []opData { {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, + {name: "RotateRightInt32x4", argLength: 2, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt32x4", argLength: 2, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, @@ -553,8 +620,15 @@ func simdGenericOps() []opData { {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, {name: "MaskedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt32x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt32x8", argLength: 3, commutative: false}, {name: "MaskedSaturatedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftInt32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt32x8", argLength: 3, commutative: false}, {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, @@ -568,8 +642,18 @@ func simdGenericOps() []opData { {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, + {name: "RotateRightInt32x8", argLength: 2, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt32x8", argLength: 2, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, @@ -599,6 +683,16 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualInt64x2", argLength: 3, commutative: true}, {name: "MaskedOrInt64x2", argLength: 3, commutative: true}, {name: "MaskedPopCountInt64x2", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt64x2", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightSignExtendedInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt64x2", argLength: 3, commutative: false}, {name: "MaskedSubInt64x2", argLength: 3, commutative: false}, {name: "MaskedXorInt64x2", argLength: 3, commutative: true}, {name: "MaxInt64x2", argLength: 2, commutative: true}, @@ -608,6 +702,16 @@ func simdGenericOps() []opData { {name: "NotEqualInt64x2", argLength: 2, commutative: true}, {name: "OrInt64x2", argLength: 2, commutative: true}, {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, + {name: "RotateRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt64x2", argLength: 2, commutative: false}, {name: "SubInt64x2", argLength: 2, commutative: false}, {name: "XorInt64x2", argLength: 2, commutative: true}, {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, @@ -635,6 +739,16 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualInt64x4", argLength: 3, commutative: true}, {name: "MaskedOrInt64x4", argLength: 3, commutative: true}, {name: "MaskedPopCountInt64x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt64x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightSignExtendedInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt64x4", argLength: 3, commutative: false}, {name: "MaskedSubInt64x4", argLength: 3, commutative: false}, {name: "MaskedXorInt64x4", argLength: 3, commutative: true}, {name: "MaxInt64x4", argLength: 2, commutative: true}, @@ -644,6 +758,16 @@ func simdGenericOps() []opData { {name: "NotEqualInt64x4", argLength: 2, commutative: true}, {name: "OrInt64x4", argLength: 2, commutative: true}, {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, + {name: "RotateRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt64x4", argLength: 2, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, {name: "XorInt64x4", argLength: 2, commutative: true}, {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, @@ -671,6 +795,16 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualInt64x8", argLength: 3, commutative: true}, {name: "MaskedOrInt64x8", argLength: 3, commutative: true}, {name: "MaskedPopCountInt64x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt64x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightSignExtendedInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt64x8", argLength: 3, commutative: false}, {name: "MaskedSubInt64x8", argLength: 3, commutative: false}, {name: "MaskedXorInt64x8", argLength: 3, commutative: true}, {name: "MaxInt64x8", argLength: 2, commutative: true}, @@ -680,6 +814,16 @@ func simdGenericOps() []opData { {name: "NotEqualInt64x8", argLength: 2, commutative: true}, {name: "OrInt64x8", argLength: 2, commutative: true}, {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, + {name: "RotateRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt64x8", argLength: 2, commutative: false}, {name: "SubInt64x8", argLength: 2, commutative: false}, {name: "XorInt64x8", argLength: 2, commutative: true}, {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, @@ -799,6 +943,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint16x16", argLength: 3, commutative: false}, {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, @@ -810,6 +959,13 @@ func simdGenericOps() []opData { {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint16x16", argLength: 2, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, @@ -833,6 +989,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint16x32", argLength: 3, commutative: false}, {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, @@ -841,6 +1002,11 @@ func simdGenericOps() []opData { {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint16x32", argLength: 2, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, @@ -865,6 +1031,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint16x8", argLength: 3, commutative: false}, {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, @@ -876,6 +1047,13 @@ func simdGenericOps() []opData { {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint16x8", argLength: 2, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, @@ -899,7 +1077,14 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint32x16", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint32x16", argLength: 3, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftUint32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint32x16", argLength: 3, commutative: false}, {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, @@ -908,7 +1093,14 @@ func simdGenericOps() []opData { {name: "NotEqualUint32x16", argLength: 2, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, + {name: "RotateRightUint32x16", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint32x16", argLength: 2, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, @@ -933,7 +1125,14 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint32x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint32x4", argLength: 3, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftUint32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint32x4", argLength: 3, commutative: false}, {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, @@ -945,7 +1144,16 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, + {name: "RotateRightUint32x4", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint32x4", argLength: 2, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, @@ -970,7 +1178,14 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint32x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint32x8", argLength: 3, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftUint32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint32x8", argLength: 3, commutative: false}, {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, @@ -982,7 +1197,16 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, + {name: "RotateRightUint32x8", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint32x8", argLength: 2, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, @@ -1008,6 +1232,15 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint64x2", argLength: 3, commutative: true}, {name: "MaskedOrUint64x2", argLength: 3, commutative: true}, {name: "MaskedPopCountUint64x2", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint64x2", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint64x2", argLength: 3, commutative: false}, {name: "MaskedSubUint64x2", argLength: 3, commutative: false}, {name: "MaskedXorUint64x2", argLength: 3, commutative: true}, {name: "MaxUint64x2", argLength: 2, commutative: true}, @@ -1016,6 +1249,15 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x2", argLength: 2, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, + {name: "RotateRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint64x2", argLength: 2, commutative: false}, {name: "SubUint64x2", argLength: 2, commutative: false}, {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "AddUint64x4", argLength: 2, commutative: true}, @@ -1040,6 +1282,15 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint64x4", argLength: 3, commutative: true}, {name: "MaskedOrUint64x4", argLength: 3, commutative: true}, {name: "MaskedPopCountUint64x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint64x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint64x4", argLength: 3, commutative: false}, {name: "MaskedSubUint64x4", argLength: 3, commutative: false}, {name: "MaskedXorUint64x4", argLength: 3, commutative: true}, {name: "MaxUint64x4", argLength: 2, commutative: true}, @@ -1048,6 +1299,15 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x4", argLength: 2, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, + {name: "RotateRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint64x4", argLength: 2, commutative: false}, {name: "SubUint64x4", argLength: 2, commutative: false}, {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "AddUint64x8", argLength: 2, commutative: true}, @@ -1072,6 +1332,15 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint64x8", argLength: 3, commutative: true}, {name: "MaskedOrUint64x8", argLength: 3, commutative: true}, {name: "MaskedPopCountUint64x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint64x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint64x8", argLength: 3, commutative: false}, {name: "MaskedSubUint64x8", argLength: 3, commutative: false}, {name: "MaskedXorUint64x8", argLength: 3, commutative: true}, {name: "MaxUint64x8", argLength: 2, commutative: true}, @@ -1080,6 +1349,15 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x8", argLength: 2, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, + {name: "RotateRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint64x8", argLength: 2, commutative: false}, {name: "SubUint64x8", argLength: 2, commutative: false}, {name: "XorUint64x8", argLength: 2, commutative: true}, {name: "AddUint8x16", argLength: 2, commutative: true}, @@ -1372,20 +1650,140 @@ func simdGenericOps() []opData { {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7a1126d433f93c..2bdbd5156e1984 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1426,6 +1426,11 @@ const ( OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSWMasked256 OpAMD64VPSUBSWMasked256 + OpAMD64VPSLLVWMasked256 + OpAMD64VPSHLDVWMasked256 + OpAMD64VPSRLVWMasked256 + OpAMD64VPSHRDVWMasked256 + OpAMD64VPSRAVWMasked256 OpAMD64VPSUBWMasked256 OpAMD64VPMAXSW256 OpAMD64VPMINSW256 @@ -1439,6 +1444,14 @@ const ( OpAMD64VPHADDSW256 OpAMD64VPHSUBSW256 OpAMD64VPSUBSW256 + OpAMD64VPSLLW256 + OpAMD64VPSRLW256 + OpAMD64VPSRAW256 + OpAMD64VPSLLVW256 + OpAMD64VPSHLDVW256 + OpAMD64VPSRLVW256 + OpAMD64VPSHRDVW256 + OpAMD64VPSRAVW256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 OpAMD64VPABSW512 @@ -1453,6 +1466,11 @@ const ( OpAMD64VPOPCNTWMasked512 OpAMD64VPADDSWMasked512 OpAMD64VPSUBSWMasked512 + OpAMD64VPSLLVWMasked512 + OpAMD64VPSHLDVWMasked512 + OpAMD64VPSRLVWMasked512 + OpAMD64VPSHRDVWMasked512 + OpAMD64VPSRAVWMasked512 OpAMD64VPSUBWMasked512 OpAMD64VPMAXSW512 OpAMD64VPMINSW512 @@ -1462,6 +1480,11 @@ const ( OpAMD64VPOPCNTW512 OpAMD64VPADDSW512 OpAMD64VPSUBSW512 + OpAMD64VPSLLVW512 + OpAMD64VPSHLDVW512 + OpAMD64VPSRLVW512 + OpAMD64VPSHRDVW512 + OpAMD64VPSRAVW512 OpAMD64VPSUBW512 OpAMD64VPABSW128 OpAMD64VPADDW128 @@ -1477,6 +1500,11 @@ const ( OpAMD64VPOPCNTWMasked128 OpAMD64VPADDSWMasked128 OpAMD64VPSUBSWMasked128 + OpAMD64VPSLLVWMasked128 + OpAMD64VPSHLDVWMasked128 + OpAMD64VPSRLVWMasked128 + OpAMD64VPSHRDVWMasked128 + OpAMD64VPSRAVWMasked128 OpAMD64VPSUBWMasked128 OpAMD64VPMAXSW128 OpAMD64VPMINSW128 @@ -1490,6 +1518,14 @@ const ( OpAMD64VPHADDSW128 OpAMD64VPHSUBSW128 OpAMD64VPSUBSW128 + OpAMD64VPSLLW128 + OpAMD64VPSRLW128 + OpAMD64VPSRAW128 + OpAMD64VPSLLVW128 + OpAMD64VPSHLDVW128 + OpAMD64VPSRLVW128 + OpAMD64VPSHRDVW128 + OpAMD64VPSRAVW128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 OpAMD64VPABSD512 @@ -1506,8 +1542,15 @@ const ( OpAMD64VPORDMasked512 OpAMD64VPDPWSSDMasked512 OpAMD64VPOPCNTDMasked512 + OpAMD64VPROLVDMasked512 + OpAMD64VPRORVDMasked512 OpAMD64VPDPWSSDSMasked512 OpAMD64VPDPBUSDSMasked512 + OpAMD64VPSLLVDMasked512 + OpAMD64VPSHLDVDMasked512 + OpAMD64VPSRLVDMasked512 + OpAMD64VPSHRDVDMasked512 + OpAMD64VPSRAVDMasked512 OpAMD64VPSUBDMasked512 OpAMD64VPDPBUSDMasked512 OpAMD64VPXORDMasked512 @@ -1517,8 +1560,15 @@ const ( OpAMD64VPORD512 OpAMD64VPDPWSSD512 OpAMD64VPOPCNTD512 + OpAMD64VPROLVD512 + OpAMD64VPRORVD512 OpAMD64VPDPWSSDS512 OpAMD64VPDPBUSDS512 + OpAMD64VPSLLVD512 + OpAMD64VPSHLDVD512 + OpAMD64VPSRLVD512 + OpAMD64VPSHRDVD512 + OpAMD64VPSRAVD512 OpAMD64VPSUBD512 OpAMD64VPDPBUSD512 OpAMD64VPXORD512 @@ -1536,8 +1586,15 @@ const ( OpAMD64VPORDMasked128 OpAMD64VPDPWSSDMasked128 OpAMD64VPOPCNTDMasked128 + OpAMD64VPROLVDMasked128 + OpAMD64VPRORVDMasked128 OpAMD64VPDPWSSDSMasked128 OpAMD64VPDPBUSDSMasked128 + OpAMD64VPSLLVDMasked128 + OpAMD64VPSHLDVDMasked128 + OpAMD64VPSRLVDMasked128 + OpAMD64VPSHRDVDMasked128 + OpAMD64VPSRAVDMasked128 OpAMD64VPSUBDMasked128 OpAMD64VPDPBUSDMasked128 OpAMD64VPXORDMasked128 @@ -1549,8 +1606,18 @@ const ( OpAMD64VPHADDD128 OpAMD64VPHSUBD128 OpAMD64VPOPCNTD128 + OpAMD64VPROLVD128 + OpAMD64VPRORVD128 OpAMD64VPDPWSSDS128 OpAMD64VPDPBUSDS128 + OpAMD64VPSLLD128 + OpAMD64VPSRLD128 + OpAMD64VPSRAD128 + OpAMD64VPSLLVD128 + OpAMD64VPSHLDVD128 + OpAMD64VPSRLVD128 + OpAMD64VPSHRDVD128 + OpAMD64VPSRAVD128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 OpAMD64VPDPBUSD128 @@ -1568,8 +1635,15 @@ const ( OpAMD64VPORDMasked256 OpAMD64VPDPWSSDMasked256 OpAMD64VPOPCNTDMasked256 + OpAMD64VPROLVDMasked256 + OpAMD64VPRORVDMasked256 OpAMD64VPDPWSSDSMasked256 OpAMD64VPDPBUSDSMasked256 + OpAMD64VPSLLVDMasked256 + OpAMD64VPSHLDVDMasked256 + OpAMD64VPSRLVDMasked256 + OpAMD64VPSHRDVDMasked256 + OpAMD64VPSRAVDMasked256 OpAMD64VPSUBDMasked256 OpAMD64VPDPBUSDMasked256 OpAMD64VPXORDMasked256 @@ -1581,8 +1655,18 @@ const ( OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 + OpAMD64VPROLVD256 + OpAMD64VPRORVD256 OpAMD64VPDPWSSDS256 OpAMD64VPDPBUSDS256 + OpAMD64VPSLLD256 + OpAMD64VPSRLD256 + OpAMD64VPSRAD256 + OpAMD64VPSLLVD256 + OpAMD64VPSHLDVD256 + OpAMD64VPSRLVD256 + OpAMD64VPSHRDVD256 + OpAMD64VPSRAVD256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 OpAMD64VPDPBUSD256 @@ -1599,12 +1683,32 @@ const ( OpAMD64VPMULLQMasked128 OpAMD64VPORQMasked128 OpAMD64VPOPCNTQMasked128 + OpAMD64VPROLVQMasked128 + OpAMD64VPRORVQMasked128 + OpAMD64VPSLLQMasked128 + OpAMD64VPSRLQMasked128 + OpAMD64VPSRAQMasked128 + OpAMD64VPSLLVQMasked128 + OpAMD64VPSHLDVQMasked128 + OpAMD64VPSRLVQMasked128 + OpAMD64VPSHRDVQMasked128 + OpAMD64VPSRAVQMasked128 OpAMD64VPSUBQMasked128 OpAMD64VPXORQMasked128 OpAMD64VPMAXSQ128 OpAMD64VPMINSQ128 OpAMD64VPMULLQ128 OpAMD64VPOPCNTQ128 + OpAMD64VPROLVQ128 + OpAMD64VPRORVQ128 + OpAMD64VPSLLQ128 + OpAMD64VPSRLQ128 + OpAMD64VPSRAQ128 + OpAMD64VPSLLVQ128 + OpAMD64VPSHLDVQ128 + OpAMD64VPSRLVQ128 + OpAMD64VPSHRDVQ128 + OpAMD64VPSRAVQ128 OpAMD64VPSUBQ128 OpAMD64VPABSQ256 OpAMD64VPADDQ256 @@ -1620,12 +1724,32 @@ const ( OpAMD64VPMULLQMasked256 OpAMD64VPORQMasked256 OpAMD64VPOPCNTQMasked256 + OpAMD64VPROLVQMasked256 + OpAMD64VPRORVQMasked256 + OpAMD64VPSLLQMasked256 + OpAMD64VPSRLQMasked256 + OpAMD64VPSRAQMasked256 + OpAMD64VPSLLVQMasked256 + OpAMD64VPSHLDVQMasked256 + OpAMD64VPSRLVQMasked256 + OpAMD64VPSHRDVQMasked256 + OpAMD64VPSRAVQMasked256 OpAMD64VPSUBQMasked256 OpAMD64VPXORQMasked256 OpAMD64VPMAXSQ256 OpAMD64VPMINSQ256 OpAMD64VPMULLQ256 OpAMD64VPOPCNTQ256 + OpAMD64VPROLVQ256 + OpAMD64VPRORVQ256 + OpAMD64VPSLLQ256 + OpAMD64VPSRLQ256 + OpAMD64VPSRAQ256 + OpAMD64VPSLLVQ256 + OpAMD64VPSHLDVQ256 + OpAMD64VPSRLVQ256 + OpAMD64VPSHRDVQ256 + OpAMD64VPSRAVQ256 OpAMD64VPSUBQ256 OpAMD64VPABSQ512 OpAMD64VPADDQ512 @@ -1641,6 +1765,16 @@ const ( OpAMD64VPMULLQMasked512 OpAMD64VPORQMasked512 OpAMD64VPOPCNTQMasked512 + OpAMD64VPROLVQMasked512 + OpAMD64VPRORVQMasked512 + OpAMD64VPSLLQMasked512 + OpAMD64VPSRLQMasked512 + OpAMD64VPSRAQMasked512 + OpAMD64VPSLLVQMasked512 + OpAMD64VPSHLDVQMasked512 + OpAMD64VPSRLVQMasked512 + OpAMD64VPSHRDVQMasked512 + OpAMD64VPSRAVQMasked512 OpAMD64VPSUBQMasked512 OpAMD64VPXORQMasked512 OpAMD64VPMAXSQ512 @@ -1649,6 +1783,16 @@ const ( OpAMD64VPMULLQ512 OpAMD64VPORQ512 OpAMD64VPOPCNTQ512 + OpAMD64VPROLVQ512 + OpAMD64VPRORVQ512 + OpAMD64VPSLLQ512 + OpAMD64VPSRLQ512 + OpAMD64VPSRAQ512 + OpAMD64VPSLLVQ512 + OpAMD64VPSHLDVQ512 + OpAMD64VPSRLVQ512 + OpAMD64VPSHRDVQ512 + OpAMD64VPSRAVQ512 OpAMD64VPSUBQ512 OpAMD64VPXORQ512 OpAMD64VPABSB128 @@ -1834,28 +1978,88 @@ const ( OpAMD64VCMPPDMasked512 OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 + OpAMD64VPSHLDWMasked256 + OpAMD64VPSHRDWMasked256 + OpAMD64VPSHLDW256 + OpAMD64VPSHRDW256 OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 + OpAMD64VPSHLDWMasked512 + OpAMD64VPSHRDWMasked512 + OpAMD64VPSHLDW512 + OpAMD64VPSHRDW512 OpAMD64VPEXTRW128 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 + OpAMD64VPSHLDWMasked128 + OpAMD64VPSHRDWMasked128 OpAMD64VPINSRW128 + OpAMD64VPSHLDW128 + OpAMD64VPSHRDW128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 + OpAMD64VPROLDMasked512 + OpAMD64VPRORDMasked512 + OpAMD64VPSHLDDMasked512 + OpAMD64VPSHRDDMasked512 + OpAMD64VPROLD512 + OpAMD64VPRORD512 + OpAMD64VPSHLDD512 + OpAMD64VPSHRDD512 OpAMD64VPEXTRD128 OpAMD64VPCMPD128 OpAMD64VPCMPDMasked128 + OpAMD64VPROLDMasked128 + OpAMD64VPRORDMasked128 + OpAMD64VPSHLDDMasked128 + OpAMD64VPSHRDDMasked128 + OpAMD64VPROLD128 + OpAMD64VPRORD128 OpAMD64VPINSRD128 + OpAMD64VPSHLDD128 + OpAMD64VPSHRDD128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 + OpAMD64VPROLDMasked256 + OpAMD64VPRORDMasked256 + OpAMD64VPSHLDDMasked256 + OpAMD64VPSHRDDMasked256 + OpAMD64VPROLD256 + OpAMD64VPRORD256 + OpAMD64VPSHLDD256 + OpAMD64VPSHRDD256 OpAMD64VPEXTRQ128 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 + OpAMD64VPROLQMasked128 + OpAMD64VPRORQMasked128 + OpAMD64VPSHLDQMasked128 + OpAMD64VPSHRDQMasked128 + OpAMD64VPROLQ128 + OpAMD64VPRORQ128 OpAMD64VPINSRQ128 + OpAMD64VPSHLDQ128 + OpAMD64VPSHRDQ128 OpAMD64VPCMPQ256 OpAMD64VPCMPQMasked256 + OpAMD64VPROLQMasked256 + OpAMD64VPRORQMasked256 + OpAMD64VPSHLDQMasked256 + OpAMD64VPSHRDQMasked256 + OpAMD64VPROLQ256 + OpAMD64VPRORQ256 + OpAMD64VPSHLDQ256 + OpAMD64VPSHRDQ256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 + OpAMD64VPROLQMasked512 + OpAMD64VPRORQMasked512 + OpAMD64VPSHLDQMasked512 + OpAMD64VPSHRDQMasked512 + OpAMD64VPROLQ512 + OpAMD64VPRORQ512 + OpAMD64VPSHLDQ512 + OpAMD64VPSHRDQ512 OpAMD64VPEXTRB128 OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 @@ -4456,6 +4660,11 @@ const ( OpMaskedPopCountInt16x16 OpMaskedSaturatedAddInt16x16 OpMaskedSaturatedSubInt16x16 + OpMaskedShiftLeftInt16x16 + OpMaskedShiftLeftAndFillUpperFromInt16x16 + OpMaskedShiftRightInt16x16 + OpMaskedShiftRightAndFillUpperFromInt16x16 + OpMaskedShiftRightSignExtendedInt16x16 OpMaskedSubInt16x16 OpMaxInt16x16 OpMinInt16x16 @@ -4471,6 +4680,14 @@ const ( OpSaturatedPairwiseAddInt16x16 OpSaturatedPairwiseSubInt16x16 OpSaturatedSubInt16x16 + OpShiftAllLeftInt16x16 + OpShiftAllRightInt16x16 + OpShiftAllRightSignExtendedInt16x16 + OpShiftLeftInt16x16 + OpShiftLeftAndFillUpperFromInt16x16 + OpShiftRightInt16x16 + OpShiftRightAndFillUpperFromInt16x16 + OpShiftRightSignExtendedInt16x16 OpSignInt16x16 OpSubInt16x16 OpXorInt16x16 @@ -4497,6 +4714,11 @@ const ( OpMaskedPopCountInt16x32 OpMaskedSaturatedAddInt16x32 OpMaskedSaturatedSubInt16x32 + OpMaskedShiftLeftInt16x32 + OpMaskedShiftLeftAndFillUpperFromInt16x32 + OpMaskedShiftRightInt16x32 + OpMaskedShiftRightAndFillUpperFromInt16x32 + OpMaskedShiftRightSignExtendedInt16x32 OpMaskedSubInt16x32 OpMaxInt16x32 OpMinInt16x32 @@ -4507,6 +4729,11 @@ const ( OpPopCountInt16x32 OpSaturatedAddInt16x32 OpSaturatedSubInt16x32 + OpShiftLeftInt16x32 + OpShiftLeftAndFillUpperFromInt16x32 + OpShiftRightInt16x32 + OpShiftRightAndFillUpperFromInt16x32 + OpShiftRightSignExtendedInt16x32 OpSubInt16x32 OpAbsoluteInt16x8 OpAddInt16x8 @@ -4533,6 +4760,11 @@ const ( OpMaskedPopCountInt16x8 OpMaskedSaturatedAddInt16x8 OpMaskedSaturatedSubInt16x8 + OpMaskedShiftLeftInt16x8 + OpMaskedShiftLeftAndFillUpperFromInt16x8 + OpMaskedShiftRightInt16x8 + OpMaskedShiftRightAndFillUpperFromInt16x8 + OpMaskedShiftRightSignExtendedInt16x8 OpMaskedSubInt16x8 OpMaxInt16x8 OpMinInt16x8 @@ -4548,6 +4780,14 @@ const ( OpSaturatedPairwiseAddInt16x8 OpSaturatedPairwiseSubInt16x8 OpSaturatedSubInt16x8 + OpShiftAllLeftInt16x8 + OpShiftAllRightInt16x8 + OpShiftAllRightSignExtendedInt16x8 + OpShiftLeftInt16x8 + OpShiftLeftAndFillUpperFromInt16x8 + OpShiftRightInt16x8 + OpShiftRightAndFillUpperFromInt16x8 + OpShiftRightSignExtendedInt16x8 OpSignInt16x8 OpSubInt16x8 OpXorInt16x8 @@ -4576,8 +4816,15 @@ const ( OpMaskedOrInt32x16 OpMaskedPairDotProdAccumulateInt32x16 OpMaskedPopCountInt32x16 + OpMaskedRotateLeftInt32x16 + OpMaskedRotateRightInt32x16 OpMaskedSaturatedPairDotProdAccumulateInt32x16 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpMaskedShiftLeftInt32x16 + OpMaskedShiftLeftAndFillUpperFromInt32x16 + OpMaskedShiftRightInt32x16 + OpMaskedShiftRightAndFillUpperFromInt32x16 + OpMaskedShiftRightSignExtendedInt32x16 OpMaskedSubInt32x16 OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16 OpMaskedXorInt32x16 @@ -4588,8 +4835,15 @@ const ( OpOrInt32x16 OpPairDotProdAccumulateInt32x16 OpPopCountInt32x16 + OpRotateLeftInt32x16 + OpRotateRightInt32x16 OpSaturatedPairDotProdAccumulateInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpShiftLeftInt32x16 + OpShiftLeftAndFillUpperFromInt32x16 + OpShiftRightInt32x16 + OpShiftRightAndFillUpperFromInt32x16 + OpShiftRightSignExtendedInt32x16 OpSubInt32x16 OpUnsignedSignedQuadDotProdAccumulateInt32x16 OpXorInt32x16 @@ -4618,8 +4872,15 @@ const ( OpMaskedOrInt32x4 OpMaskedPairDotProdAccumulateInt32x4 OpMaskedPopCountInt32x4 + OpMaskedRotateLeftInt32x4 + OpMaskedRotateRightInt32x4 OpMaskedSaturatedPairDotProdAccumulateInt32x4 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpMaskedShiftLeftInt32x4 + OpMaskedShiftLeftAndFillUpperFromInt32x4 + OpMaskedShiftRightInt32x4 + OpMaskedShiftRightAndFillUpperFromInt32x4 + OpMaskedShiftRightSignExtendedInt32x4 OpMaskedSubInt32x4 OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4 OpMaskedXorInt32x4 @@ -4633,8 +4894,18 @@ const ( OpPairwiseAddInt32x4 OpPairwiseSubInt32x4 OpPopCountInt32x4 + OpRotateLeftInt32x4 + OpRotateRightInt32x4 OpSaturatedPairDotProdAccumulateInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpShiftAllLeftInt32x4 + OpShiftAllRightInt32x4 + OpShiftAllRightSignExtendedInt32x4 + OpShiftLeftInt32x4 + OpShiftLeftAndFillUpperFromInt32x4 + OpShiftRightInt32x4 + OpShiftRightAndFillUpperFromInt32x4 + OpShiftRightSignExtendedInt32x4 OpSignInt32x4 OpSubInt32x4 OpUnsignedSignedQuadDotProdAccumulateInt32x4 @@ -4664,8 +4935,15 @@ const ( OpMaskedOrInt32x8 OpMaskedPairDotProdAccumulateInt32x8 OpMaskedPopCountInt32x8 + OpMaskedRotateLeftInt32x8 + OpMaskedRotateRightInt32x8 OpMaskedSaturatedPairDotProdAccumulateInt32x8 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 + OpMaskedShiftLeftInt32x8 + OpMaskedShiftLeftAndFillUpperFromInt32x8 + OpMaskedShiftRightInt32x8 + OpMaskedShiftRightAndFillUpperFromInt32x8 + OpMaskedShiftRightSignExtendedInt32x8 OpMaskedSubInt32x8 OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8 OpMaskedXorInt32x8 @@ -4679,8 +4957,18 @@ const ( OpPairwiseAddInt32x8 OpPairwiseSubInt32x8 OpPopCountInt32x8 + OpRotateLeftInt32x8 + OpRotateRightInt32x8 OpSaturatedPairDotProdAccumulateInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 + OpShiftAllLeftInt32x8 + OpShiftAllRightInt32x8 + OpShiftAllRightSignExtendedInt32x8 + OpShiftLeftInt32x8 + OpShiftLeftAndFillUpperFromInt32x8 + OpShiftRightInt32x8 + OpShiftRightAndFillUpperFromInt32x8 + OpShiftRightSignExtendedInt32x8 OpSignInt32x8 OpSubInt32x8 OpUnsignedSignedQuadDotProdAccumulateInt32x8 @@ -4710,6 +4998,16 @@ const ( OpMaskedNotEqualInt64x2 OpMaskedOrInt64x2 OpMaskedPopCountInt64x2 + OpMaskedRotateLeftInt64x2 + OpMaskedRotateRightInt64x2 + OpMaskedShiftAllLeftInt64x2 + OpMaskedShiftAllRightInt64x2 + OpMaskedShiftAllRightSignExtendedInt64x2 + OpMaskedShiftLeftInt64x2 + OpMaskedShiftLeftAndFillUpperFromInt64x2 + OpMaskedShiftRightInt64x2 + OpMaskedShiftRightAndFillUpperFromInt64x2 + OpMaskedShiftRightSignExtendedInt64x2 OpMaskedSubInt64x2 OpMaskedXorInt64x2 OpMaxInt64x2 @@ -4719,6 +5017,16 @@ const ( OpNotEqualInt64x2 OpOrInt64x2 OpPopCountInt64x2 + OpRotateLeftInt64x2 + OpRotateRightInt64x2 + OpShiftAllLeftInt64x2 + OpShiftAllRightInt64x2 + OpShiftAllRightSignExtendedInt64x2 + OpShiftLeftInt64x2 + OpShiftLeftAndFillUpperFromInt64x2 + OpShiftRightInt64x2 + OpShiftRightAndFillUpperFromInt64x2 + OpShiftRightSignExtendedInt64x2 OpSubInt64x2 OpXorInt64x2 OpAbsoluteInt64x4 @@ -4746,6 +5054,16 @@ const ( OpMaskedNotEqualInt64x4 OpMaskedOrInt64x4 OpMaskedPopCountInt64x4 + OpMaskedRotateLeftInt64x4 + OpMaskedRotateRightInt64x4 + OpMaskedShiftAllLeftInt64x4 + OpMaskedShiftAllRightInt64x4 + OpMaskedShiftAllRightSignExtendedInt64x4 + OpMaskedShiftLeftInt64x4 + OpMaskedShiftLeftAndFillUpperFromInt64x4 + OpMaskedShiftRightInt64x4 + OpMaskedShiftRightAndFillUpperFromInt64x4 + OpMaskedShiftRightSignExtendedInt64x4 OpMaskedSubInt64x4 OpMaskedXorInt64x4 OpMaxInt64x4 @@ -4755,6 +5073,16 @@ const ( OpNotEqualInt64x4 OpOrInt64x4 OpPopCountInt64x4 + OpRotateLeftInt64x4 + OpRotateRightInt64x4 + OpShiftAllLeftInt64x4 + OpShiftAllRightInt64x4 + OpShiftAllRightSignExtendedInt64x4 + OpShiftLeftInt64x4 + OpShiftLeftAndFillUpperFromInt64x4 + OpShiftRightInt64x4 + OpShiftRightAndFillUpperFromInt64x4 + OpShiftRightSignExtendedInt64x4 OpSubInt64x4 OpXorInt64x4 OpAbsoluteInt64x8 @@ -4782,6 +5110,16 @@ const ( OpMaskedNotEqualInt64x8 OpMaskedOrInt64x8 OpMaskedPopCountInt64x8 + OpMaskedRotateLeftInt64x8 + OpMaskedRotateRightInt64x8 + OpMaskedShiftAllLeftInt64x8 + OpMaskedShiftAllRightInt64x8 + OpMaskedShiftAllRightSignExtendedInt64x8 + OpMaskedShiftLeftInt64x8 + OpMaskedShiftLeftAndFillUpperFromInt64x8 + OpMaskedShiftRightInt64x8 + OpMaskedShiftRightAndFillUpperFromInt64x8 + OpMaskedShiftRightSignExtendedInt64x8 OpMaskedSubInt64x8 OpMaskedXorInt64x8 OpMaxInt64x8 @@ -4791,6 +5129,16 @@ const ( OpNotEqualInt64x8 OpOrInt64x8 OpPopCountInt64x8 + OpRotateLeftInt64x8 + OpRotateRightInt64x8 + OpShiftAllLeftInt64x8 + OpShiftAllRightInt64x8 + OpShiftAllRightSignExtendedInt64x8 + OpShiftLeftInt64x8 + OpShiftLeftAndFillUpperFromInt64x8 + OpShiftRightInt64x8 + OpShiftRightAndFillUpperFromInt64x8 + OpShiftRightSignExtendedInt64x8 OpSubInt64x8 OpXorInt64x8 OpAbsoluteInt8x16 @@ -4910,6 +5258,11 @@ const ( OpMaskedPopCountUint16x16 OpMaskedSaturatedAddUint16x16 OpMaskedSaturatedSubUint16x16 + OpMaskedShiftLeftUint16x16 + OpMaskedShiftLeftAndFillUpperFromUint16x16 + OpMaskedShiftRightUint16x16 + OpMaskedShiftRightAndFillUpperFromUint16x16 + OpMaskedShiftRightSignExtendedUint16x16 OpMaskedSubUint16x16 OpMaxUint16x16 OpMinUint16x16 @@ -4921,6 +5274,13 @@ const ( OpPopCountUint16x16 OpSaturatedAddUint16x16 OpSaturatedSubUint16x16 + OpShiftAllLeftUint16x16 + OpShiftAllRightUint16x16 + OpShiftLeftUint16x16 + OpShiftLeftAndFillUpperFromUint16x16 + OpShiftRightUint16x16 + OpShiftRightAndFillUpperFromUint16x16 + OpShiftRightSignExtendedUint16x16 OpSubUint16x16 OpXorUint16x16 OpAddUint16x32 @@ -4944,6 +5304,11 @@ const ( OpMaskedPopCountUint16x32 OpMaskedSaturatedAddUint16x32 OpMaskedSaturatedSubUint16x32 + OpMaskedShiftLeftUint16x32 + OpMaskedShiftLeftAndFillUpperFromUint16x32 + OpMaskedShiftRightUint16x32 + OpMaskedShiftRightAndFillUpperFromUint16x32 + OpMaskedShiftRightSignExtendedUint16x32 OpMaskedSubUint16x32 OpMaxUint16x32 OpMinUint16x32 @@ -4952,6 +5317,11 @@ const ( OpPopCountUint16x32 OpSaturatedAddUint16x32 OpSaturatedSubUint16x32 + OpShiftLeftUint16x32 + OpShiftLeftAndFillUpperFromUint16x32 + OpShiftRightUint16x32 + OpShiftRightAndFillUpperFromUint16x32 + OpShiftRightSignExtendedUint16x32 OpSubUint16x32 OpAddUint16x8 OpAndUint16x8 @@ -4976,6 +5346,11 @@ const ( OpMaskedPopCountUint16x8 OpMaskedSaturatedAddUint16x8 OpMaskedSaturatedSubUint16x8 + OpMaskedShiftLeftUint16x8 + OpMaskedShiftLeftAndFillUpperFromUint16x8 + OpMaskedShiftRightUint16x8 + OpMaskedShiftRightAndFillUpperFromUint16x8 + OpMaskedShiftRightSignExtendedUint16x8 OpMaskedSubUint16x8 OpMaxUint16x8 OpMinUint16x8 @@ -4987,6 +5362,13 @@ const ( OpPopCountUint16x8 OpSaturatedAddUint16x8 OpSaturatedSubUint16x8 + OpShiftAllLeftUint16x8 + OpShiftAllRightUint16x8 + OpShiftLeftUint16x8 + OpShiftLeftAndFillUpperFromUint16x8 + OpShiftRightUint16x8 + OpShiftRightAndFillUpperFromUint16x8 + OpShiftRightSignExtendedUint16x8 OpSubUint16x8 OpXorUint16x8 OpAddUint32x16 @@ -5010,7 +5392,14 @@ const ( OpMaskedNotEqualUint32x16 OpMaskedOrUint32x16 OpMaskedPopCountUint32x16 + OpMaskedRotateLeftUint32x16 + OpMaskedRotateRightUint32x16 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 + OpMaskedShiftLeftUint32x16 + OpMaskedShiftLeftAndFillUpperFromUint32x16 + OpMaskedShiftRightUint32x16 + OpMaskedShiftRightAndFillUpperFromUint32x16 + OpMaskedShiftRightSignExtendedUint32x16 OpMaskedSubUint32x16 OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16 OpMaskedXorUint32x16 @@ -5019,7 +5408,14 @@ const ( OpNotEqualUint32x16 OpOrUint32x16 OpPopCountUint32x16 + OpRotateLeftUint32x16 + OpRotateRightUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 + OpShiftLeftUint32x16 + OpShiftLeftAndFillUpperFromUint32x16 + OpShiftRightUint32x16 + OpShiftRightAndFillUpperFromUint32x16 + OpShiftRightSignExtendedUint32x16 OpSubUint32x16 OpUnsignedSignedQuadDotProdAccumulateUint32x16 OpXorUint32x16 @@ -5044,7 +5440,14 @@ const ( OpMaskedNotEqualUint32x4 OpMaskedOrUint32x4 OpMaskedPopCountUint32x4 + OpMaskedRotateLeftUint32x4 + OpMaskedRotateRightUint32x4 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 + OpMaskedShiftLeftUint32x4 + OpMaskedShiftLeftAndFillUpperFromUint32x4 + OpMaskedShiftRightUint32x4 + OpMaskedShiftRightAndFillUpperFromUint32x4 + OpMaskedShiftRightSignExtendedUint32x4 OpMaskedSubUint32x4 OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4 OpMaskedXorUint32x4 @@ -5056,7 +5459,16 @@ const ( OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPopCountUint32x4 + OpRotateLeftUint32x4 + OpRotateRightUint32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 + OpShiftAllLeftUint32x4 + OpShiftAllRightUint32x4 + OpShiftLeftUint32x4 + OpShiftLeftAndFillUpperFromUint32x4 + OpShiftRightUint32x4 + OpShiftRightAndFillUpperFromUint32x4 + OpShiftRightSignExtendedUint32x4 OpSubUint32x4 OpUnsignedSignedQuadDotProdAccumulateUint32x4 OpXorUint32x4 @@ -5081,7 +5493,14 @@ const ( OpMaskedNotEqualUint32x8 OpMaskedOrUint32x8 OpMaskedPopCountUint32x8 + OpMaskedRotateLeftUint32x8 + OpMaskedRotateRightUint32x8 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 + OpMaskedShiftLeftUint32x8 + OpMaskedShiftLeftAndFillUpperFromUint32x8 + OpMaskedShiftRightUint32x8 + OpMaskedShiftRightAndFillUpperFromUint32x8 + OpMaskedShiftRightSignExtendedUint32x8 OpMaskedSubUint32x8 OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8 OpMaskedXorUint32x8 @@ -5093,7 +5512,16 @@ const ( OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPopCountUint32x8 + OpRotateLeftUint32x8 + OpRotateRightUint32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 + OpShiftAllLeftUint32x8 + OpShiftAllRightUint32x8 + OpShiftLeftUint32x8 + OpShiftLeftAndFillUpperFromUint32x8 + OpShiftRightUint32x8 + OpShiftRightAndFillUpperFromUint32x8 + OpShiftRightSignExtendedUint32x8 OpSubUint32x8 OpUnsignedSignedQuadDotProdAccumulateUint32x8 OpXorUint32x8 @@ -5119,6 +5547,15 @@ const ( OpMaskedNotEqualUint64x2 OpMaskedOrUint64x2 OpMaskedPopCountUint64x2 + OpMaskedRotateLeftUint64x2 + OpMaskedRotateRightUint64x2 + OpMaskedShiftAllLeftUint64x2 + OpMaskedShiftAllRightUint64x2 + OpMaskedShiftLeftUint64x2 + OpMaskedShiftLeftAndFillUpperFromUint64x2 + OpMaskedShiftRightUint64x2 + OpMaskedShiftRightAndFillUpperFromUint64x2 + OpMaskedShiftRightSignExtendedUint64x2 OpMaskedSubUint64x2 OpMaskedXorUint64x2 OpMaxUint64x2 @@ -5127,6 +5564,15 @@ const ( OpNotEqualUint64x2 OpOrUint64x2 OpPopCountUint64x2 + OpRotateLeftUint64x2 + OpRotateRightUint64x2 + OpShiftAllLeftUint64x2 + OpShiftAllRightUint64x2 + OpShiftLeftUint64x2 + OpShiftLeftAndFillUpperFromUint64x2 + OpShiftRightUint64x2 + OpShiftRightAndFillUpperFromUint64x2 + OpShiftRightSignExtendedUint64x2 OpSubUint64x2 OpXorUint64x2 OpAddUint64x4 @@ -5151,6 +5597,15 @@ const ( OpMaskedNotEqualUint64x4 OpMaskedOrUint64x4 OpMaskedPopCountUint64x4 + OpMaskedRotateLeftUint64x4 + OpMaskedRotateRightUint64x4 + OpMaskedShiftAllLeftUint64x4 + OpMaskedShiftAllRightUint64x4 + OpMaskedShiftLeftUint64x4 + OpMaskedShiftLeftAndFillUpperFromUint64x4 + OpMaskedShiftRightUint64x4 + OpMaskedShiftRightAndFillUpperFromUint64x4 + OpMaskedShiftRightSignExtendedUint64x4 OpMaskedSubUint64x4 OpMaskedXorUint64x4 OpMaxUint64x4 @@ -5159,6 +5614,15 @@ const ( OpNotEqualUint64x4 OpOrUint64x4 OpPopCountUint64x4 + OpRotateLeftUint64x4 + OpRotateRightUint64x4 + OpShiftAllLeftUint64x4 + OpShiftAllRightUint64x4 + OpShiftLeftUint64x4 + OpShiftLeftAndFillUpperFromUint64x4 + OpShiftRightUint64x4 + OpShiftRightAndFillUpperFromUint64x4 + OpShiftRightSignExtendedUint64x4 OpSubUint64x4 OpXorUint64x4 OpAddUint64x8 @@ -5183,6 +5647,15 @@ const ( OpMaskedNotEqualUint64x8 OpMaskedOrUint64x8 OpMaskedPopCountUint64x8 + OpMaskedRotateLeftUint64x8 + OpMaskedRotateRightUint64x8 + OpMaskedShiftAllLeftUint64x8 + OpMaskedShiftAllRightUint64x8 + OpMaskedShiftLeftUint64x8 + OpMaskedShiftLeftAndFillUpperFromUint64x8 + OpMaskedShiftRightUint64x8 + OpMaskedShiftRightAndFillUpperFromUint64x8 + OpMaskedShiftRightSignExtendedUint64x8 OpMaskedSubUint64x8 OpMaskedXorUint64x8 OpMaxUint64x8 @@ -5191,6 +5664,15 @@ const ( OpNotEqualUint64x8 OpOrUint64x8 OpPopCountUint64x8 + OpRotateLeftUint64x8 + OpRotateRightUint64x8 + OpShiftAllLeftUint64x8 + OpShiftAllRightUint64x8 + OpShiftLeftUint64x8 + OpShiftLeftAndFillUpperFromUint64x8 + OpShiftRightUint64x8 + OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightSignExtendedUint64x8 OpSubUint64x8 OpXorUint64x8 OpAddUint8x16 @@ -5483,20 +5965,140 @@ const ( OpRoundWithPrecisionFloat64x8 OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpMaskedShiftAllLeftAndFillUpperFromInt16x16 + OpMaskedShiftAllRightAndFillUpperFromInt16x16 + OpShiftAllLeftAndFillUpperFromInt16x16 + OpShiftAllRightAndFillUpperFromInt16x16 + OpMaskedShiftAllLeftAndFillUpperFromInt16x32 + OpMaskedShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllLeftAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromInt16x32 OpGetElemInt16x8 + OpMaskedShiftAllLeftAndFillUpperFromInt16x8 + OpMaskedShiftAllRightAndFillUpperFromInt16x8 OpSetElemInt16x8 + OpShiftAllLeftAndFillUpperFromInt16x8 + OpShiftAllRightAndFillUpperFromInt16x8 + OpMaskedRotateAllLeftInt32x16 + OpMaskedRotateAllRightInt32x16 + OpMaskedShiftAllLeftAndFillUpperFromInt32x16 + OpMaskedShiftAllRightAndFillUpperFromInt32x16 + OpRotateAllLeftInt32x16 + OpRotateAllRightInt32x16 + OpShiftAllLeftAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromInt32x16 OpGetElemInt32x4 + OpMaskedRotateAllLeftInt32x4 + OpMaskedRotateAllRightInt32x4 + OpMaskedShiftAllLeftAndFillUpperFromInt32x4 + OpMaskedShiftAllRightAndFillUpperFromInt32x4 + OpRotateAllLeftInt32x4 + OpRotateAllRightInt32x4 OpSetElemInt32x4 + OpShiftAllLeftAndFillUpperFromInt32x4 + OpShiftAllRightAndFillUpperFromInt32x4 + OpMaskedRotateAllLeftInt32x8 + OpMaskedRotateAllRightInt32x8 + OpMaskedShiftAllLeftAndFillUpperFromInt32x8 + OpMaskedShiftAllRightAndFillUpperFromInt32x8 + OpRotateAllLeftInt32x8 + OpRotateAllRightInt32x8 + OpShiftAllLeftAndFillUpperFromInt32x8 + OpShiftAllRightAndFillUpperFromInt32x8 OpGetElemInt64x2 + OpMaskedRotateAllLeftInt64x2 + OpMaskedRotateAllRightInt64x2 + OpMaskedShiftAllLeftAndFillUpperFromInt64x2 + OpMaskedShiftAllRightAndFillUpperFromInt64x2 + OpRotateAllLeftInt64x2 + OpRotateAllRightInt64x2 OpSetElemInt64x2 + OpShiftAllLeftAndFillUpperFromInt64x2 + OpShiftAllRightAndFillUpperFromInt64x2 + OpMaskedRotateAllLeftInt64x4 + OpMaskedRotateAllRightInt64x4 + OpMaskedShiftAllLeftAndFillUpperFromInt64x4 + OpMaskedShiftAllRightAndFillUpperFromInt64x4 + OpRotateAllLeftInt64x4 + OpRotateAllRightInt64x4 + OpShiftAllLeftAndFillUpperFromInt64x4 + OpShiftAllRightAndFillUpperFromInt64x4 + OpMaskedRotateAllLeftInt64x8 + OpMaskedRotateAllRightInt64x8 + OpMaskedShiftAllLeftAndFillUpperFromInt64x8 + OpMaskedShiftAllRightAndFillUpperFromInt64x8 + OpRotateAllLeftInt64x8 + OpRotateAllRightInt64x8 + OpShiftAllLeftAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 + OpMaskedShiftAllLeftAndFillUpperFromUint16x16 + OpMaskedShiftAllRightAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllRightAndFillUpperFromUint16x16 + OpMaskedShiftAllLeftAndFillUpperFromUint16x32 + OpMaskedShiftAllRightAndFillUpperFromUint16x32 + OpShiftAllLeftAndFillUpperFromUint16x32 + OpShiftAllRightAndFillUpperFromUint16x32 OpGetElemUint16x8 + OpMaskedShiftAllLeftAndFillUpperFromUint16x8 + OpMaskedShiftAllRightAndFillUpperFromUint16x8 OpSetElemUint16x8 + OpShiftAllLeftAndFillUpperFromUint16x8 + OpShiftAllRightAndFillUpperFromUint16x8 + OpMaskedRotateAllLeftUint32x16 + OpMaskedRotateAllRightUint32x16 + OpMaskedShiftAllLeftAndFillUpperFromUint32x16 + OpMaskedShiftAllRightAndFillUpperFromUint32x16 + OpRotateAllLeftUint32x16 + OpRotateAllRightUint32x16 + OpShiftAllLeftAndFillUpperFromUint32x16 + OpShiftAllRightAndFillUpperFromUint32x16 OpGetElemUint32x4 + OpMaskedRotateAllLeftUint32x4 + OpMaskedRotateAllRightUint32x4 + OpMaskedShiftAllLeftAndFillUpperFromUint32x4 + OpMaskedShiftAllRightAndFillUpperFromUint32x4 + OpRotateAllLeftUint32x4 + OpRotateAllRightUint32x4 OpSetElemUint32x4 + OpShiftAllLeftAndFillUpperFromUint32x4 + OpShiftAllRightAndFillUpperFromUint32x4 + OpMaskedRotateAllLeftUint32x8 + OpMaskedRotateAllRightUint32x8 + OpMaskedShiftAllLeftAndFillUpperFromUint32x8 + OpMaskedShiftAllRightAndFillUpperFromUint32x8 + OpRotateAllLeftUint32x8 + OpRotateAllRightUint32x8 + OpShiftAllLeftAndFillUpperFromUint32x8 + OpShiftAllRightAndFillUpperFromUint32x8 OpGetElemUint64x2 + OpMaskedRotateAllLeftUint64x2 + OpMaskedRotateAllRightUint64x2 + OpMaskedShiftAllLeftAndFillUpperFromUint64x2 + OpMaskedShiftAllRightAndFillUpperFromUint64x2 + OpRotateAllLeftUint64x2 + OpRotateAllRightUint64x2 OpSetElemUint64x2 + OpShiftAllLeftAndFillUpperFromUint64x2 + OpShiftAllRightAndFillUpperFromUint64x2 + OpMaskedRotateAllLeftUint64x4 + OpMaskedRotateAllRightUint64x4 + OpMaskedShiftAllLeftAndFillUpperFromUint64x4 + OpMaskedShiftAllRightAndFillUpperFromUint64x4 + OpRotateAllLeftUint64x4 + OpRotateAllRightUint64x4 + OpShiftAllLeftAndFillUpperFromUint64x4 + OpShiftAllRightAndFillUpperFromUint64x4 + OpMaskedRotateAllLeftUint64x8 + OpMaskedRotateAllRightUint64x8 + OpMaskedShiftAllLeftAndFillUpperFromUint64x8 + OpMaskedShiftAllRightAndFillUpperFromUint64x8 + OpRotateAllLeftUint64x8 + OpRotateAllRightUint64x8 + OpShiftAllLeftAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromUint64x8 OpGetElemUint8x16 OpSetElemUint8x16 ) @@ -21551,6 +22153,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVWMasked256", + argLen: 3, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVWMasked256", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVWMasked256", + argLen: 3, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBWMasked256", argLen: 3, @@ -21738,6 +22419,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW256", + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW256", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW256", + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVW256", + argLen: 2, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVW256", + argLen: 2, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGNW256", argLen: 2, @@ -21948,6 +22745,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVWMasked512", + argLen: 3, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVWMasked512", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVWMasked512", + argLen: 3, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBWMasked512", argLen: 3, @@ -22079,6 +22955,80 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVW512", + argLen: 2, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVW512", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVW512", + argLen: 2, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBW512", argLen: 2, @@ -22304,6 +23254,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVWMasked128", + argLen: 3, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVWMasked128", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVWMasked128", + argLen: 3, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBWMasked128", argLen: 3, @@ -22491,6 +23520,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW128", + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW128", + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVW128", + argLen: 2, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVW128", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVW128", + argLen: 2, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGNW128", argLen: 2, @@ -22732,6 +23877,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked512", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVDMasked512", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDSMasked512", argLen: 4, @@ -22766,6 +23941,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVDMasked512", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVDMasked512", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked512", argLen: 3, @@ -22903,6 +24157,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVD512", + argLen: 2, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVD512", + argLen: 2, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS512", argLen: 3, @@ -22935,6 +24217,80 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVD512", + argLen: 2, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD512", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD512", + argLen: 2, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBD512", argLen: 2, @@ -23193,6 +24549,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked128", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVDMasked128", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDSMasked128", argLen: 4, @@ -23227,6 +24613,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVDMasked128", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked128", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVDMasked128", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked128", argLen: 3, @@ -23392,6 +24857,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVD128", + argLen: 2, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVD128", + argLen: 2, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS128", argLen: 3, @@ -23424,6 +24917,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLD128", + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD128", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD128", + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVD128", + argLen: 2, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD128", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD128", + argLen: 2, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND128", argLen: 2, @@ -23681,6 +25290,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked256", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVDMasked256", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDSMasked256", argLen: 4, @@ -23715,6 +25354,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVDMasked256", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVDMasked256", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked256", argLen: 3, @@ -23880,6 +25598,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVD256", + argLen: 2, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVD256", + argLen: 2, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS256", argLen: 3, @@ -23912,6 +25658,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLD256", + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD256", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD256", + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVD256", + argLen: 2, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD256", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD256", + argLen: 2, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND256", argLen: 2, @@ -24155,9 +26017,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked128", + name: "VPROLVQMasked128", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24170,10 +26032,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPRORVQMasked128", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24186,29 +26047,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSLLQMasked128", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24216,14 +26062,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24231,12 +26077,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRAQMasked128", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24244,13 +26092,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ128", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPSLLVQMasked128", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24258,12 +26107,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPSHLDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24271,14 +26124,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ256", - argLen: 2, - commutative: true, - asm: x86.AVPADDQ, + name: "VPSRLVQMasked128", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24286,14 +26139,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, + name: "VPSHRDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24301,13 +26156,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", - argLen: 2, - asm: x86.AVPCMPGTQ, + name: "VPSRAVQMasked128", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24315,13 +26171,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24329,10 +26186,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked256", + name: "VPXORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24345,15 +26202,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256", - argLen: 3, + name: "VPMAXSQ128", + argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24361,14 +26217,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMINSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24376,15 +26232,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256", - argLen: 3, + name: "VPMULLQ128", + argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24392,15 +26247,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24408,15 +26260,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPROLVQ128", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24424,15 +26274,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPRORVQ128", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24440,15 +26288,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPORQ, + name: "VPSLLQ128", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24456,13 +26302,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256", + name: "VPSRLQ128", argLen: 2, - asm: x86.AVPOPCNTQ, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24470,14 +26316,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSRAQ128", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24485,15 +26330,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSLLVQ128", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24501,14 +26344,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSHLDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24516,10 +26360,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRLVQ128", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24531,14 +26374,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSHRDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24546,12 +26390,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRAVQ128", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24559,7 +26404,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", + name: "VPSUBQ128", argLen: 2, asm: x86.AVPSUBQ, reg: regInfo{ @@ -24573,7 +26418,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512", + name: "VPABSQ256", argLen: 1, asm: x86.AVPABSQ, reg: regInfo{ @@ -24586,7 +26431,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", + name: "VPADDQ256", argLen: 2, commutative: true, asm: x86.AVPADDQ, @@ -24601,10 +26446,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", + name: "VPCMPEQQ256", argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24616,9 +26461,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", + name: "VPCMPGTQ256", argLen: 2, - asm: x86.AVPANDNQ, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24630,7 +26475,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", + name: "VPABSQMasked256", argLen: 2, asm: x86.AVPABSQ, reg: regInfo{ @@ -24644,7 +26489,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512", + name: "VPADDQMasked256", argLen: 3, commutative: true, asm: x86.AVPADDQ, @@ -24660,7 +26505,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512", + name: "VPANDQMasked256", argLen: 3, commutative: true, asm: x86.AVPANDQ, @@ -24676,7 +26521,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", + name: "VPANDNQMasked256", argLen: 3, asm: x86.AVPANDNQ, reg: regInfo{ @@ -24691,7 +26536,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPMAXSQMasked256", argLen: 3, commutative: true, asm: x86.AVPMAXSQ, @@ -24707,7 +26552,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512", + name: "VPMINSQMasked256", argLen: 3, commutative: true, asm: x86.AVPMINSQ, @@ -24723,7 +26568,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked512", + name: "VPMULDQMasked256", argLen: 3, commutative: true, asm: x86.AVPMULDQ, @@ -24739,7 +26584,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", + name: "VPMULLQMasked256", argLen: 3, commutative: true, asm: x86.AVPMULLQ, @@ -24755,7 +26600,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", + name: "VPORQMasked256", argLen: 3, commutative: true, asm: x86.AVPORQ, @@ -24771,7 +26616,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512", + name: "VPOPCNTQMasked256", argLen: 2, asm: x86.AVPOPCNTQ, reg: regInfo{ @@ -24785,9 +26630,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512", + name: "VPROLVQMasked256", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24800,10 +26645,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPRORVQMasked256", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24816,14 +26660,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSLLQMasked256", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24831,14 +26675,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRLQMasked256", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24846,14 +26690,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPSRAQMasked256", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24861,14 +26705,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSLLVQMasked256", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24876,14 +26720,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPORQ, + name: "VPSHLDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24891,12 +26737,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24904,13 +26752,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPSHRDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24918,14 +26769,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSRAVQMasked256", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24933,12 +26784,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24946,14 +26799,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB128", - argLen: 2, + name: "VPXORQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24961,10 +26815,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND128", + name: "VPMAXSQ256", argLen: 2, commutative: true, - asm: x86.AVPAND, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24976,9 +26830,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", - argLen: 2, - asm: x86.AVPANDN, + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24990,10 +26845,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB128", + name: "VPMULLQ256", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25005,13 +26860,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB128", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25019,13 +26873,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", + name: "VPROLVQ256", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25033,15 +26887,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPRORVQ256", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25049,15 +26901,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSLLQ256", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25065,15 +26915,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25081,13 +26929,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked128", + name: "VPSRAQ256", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25095,15 +26943,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLVQ256", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25111,14 +26957,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSHLDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25126,14 +26973,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSRLVQ256", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25141,14 +26987,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25156,10 +27003,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRAVQ256", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25171,10 +27017,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSUBQ256", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25186,9 +27031,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", + name: "VPABSQ512", argLen: 1, - asm: x86.AVPOPCNTB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25199,10 +27044,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB128", + name: "VPADDQ512", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25214,9 +27059,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25228,9 +27074,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", + name: "VPANDNQ512", argLen: 2, - asm: x86.AVPSIGNB, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25242,13 +27088,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", + name: "VPABSQMasked512", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25256,14 +27102,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR128", - argLen: 2, + name: "VPADDQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPXOR, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25271,27 +27118,46 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB256", - argLen: 1, - asm: x86.AVPABSB, + name: "VPANDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDB256", - argLen: 2, + name: "VPMAXSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25299,14 +27165,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", - argLen: 2, + name: "VPMINSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPAND, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25314,13 +27181,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - asm: x86.AVPANDN, + name: "VPMULDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25328,14 +27197,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", - argLen: 2, + name: "VPMULLQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25343,13 +27213,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25357,9 +27229,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", + name: "VPOPCNTQMasked512", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25371,10 +27243,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPROLVQMasked512", + argLen: 3, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25387,10 +27258,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPRORVQMasked512", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25403,10 +27273,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSLLQMasked512", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25419,13 +27288,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked256", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPSRLQMasked512", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25433,10 +27303,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSRAQMasked512", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25449,9 +27318,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", + name: "VPSLLVQMasked512", argLen: 3, - asm: x86.AVPSUBSB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25464,9 +27333,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked256", + name: "VPSHLDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked512", argLen: 3, - asm: x86.AVPSUBB, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25479,14 +27365,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25494,14 +27382,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRAVQMasked512", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25509,14 +27397,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25524,12 +27412,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPXORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25537,10 +27428,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", + name: "VPMAXSQ512", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25552,9 +27443,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25566,9 +27458,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB256", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPMULDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25580,9 +27473,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB256", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPMULLQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25594,10 +27488,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR256", + name: "VPORQ512", argLen: 2, commutative: true, - asm: x86.AVPXOR, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25609,9 +27503,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", + name: "VPOPCNTQ512", argLen: 1, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25622,10 +27516,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPROLVQ512", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25637,13 +27530,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked512", + name: "VPRORVQ512", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25651,15 +27544,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSLLQ512", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25667,15 +27558,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25683,15 +27572,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRAQ512", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25699,13 +27586,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked512", + name: "VPSLLVQ512", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25713,15 +27600,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSHLDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25729,14 +27616,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked512", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSRLVQ512", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25744,14 +27630,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSHRDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25759,10 +27646,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSRAVQ512", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25774,10 +27660,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPXORQ512", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25789,9 +27689,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", + name: "VPABSB128", argLen: 1, - asm: x86.AVPOPCNTB, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25802,10 +27702,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", + name: "VPADDB128", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25817,9 +27717,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPAND128", + argLen: 2, + commutative: true, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25831,9 +27732,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB512", + name: "VPANDN128", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25845,10 +27746,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW256", + name: "VPCMPEQB128", argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25860,15 +27761,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25876,10 +27789,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", + name: "VPADDBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25892,10 +27805,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", + name: "VPMAXSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25908,10 +27821,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked256", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25924,14 +27837,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25939,14 +27851,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", - argLen: 2, + name: "VPADDSBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25954,14 +27867,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBBMasked128", + argLen: 3, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25969,10 +27897,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW512", + name: "VPMAXSB128", argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25984,15 +27912,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked512", - argLen: 3, + name: "VPMINSB128", + argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26000,15 +27927,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, + name: "VPOR128", + argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26016,15 +27942,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26032,15 +27955,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked512", - argLen: 3, + name: "VPADDSB128", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26048,10 +27970,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26063,10 +27984,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSIGNB128", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26078,10 +27998,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26093,10 +28012,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW128", + name: "VPXOR128", argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26108,15 +28027,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26124,15 +28040,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked128", - argLen: 3, + name: "VPADDB256", + argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26140,15 +28055,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked128", - argLen: 3, + name: "VPAND256", + argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26156,15 +28070,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPANDN256", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26172,10 +28084,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", + name: "VPCMPEQB256", argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26187,10 +28099,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26202,14 +28113,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26217,10 +28127,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked512", + name: "VPADDBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26233,10 +28143,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked512", + name: "VPMAXSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26249,14 +28159,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512", - argLen: 2, + name: "VPMINSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26264,14 +28175,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26279,10 +28189,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128", + name: "VPADDSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26295,10 +28205,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26311,14 +28220,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSUBBMasked256", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26326,10 +28235,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", + name: "VPMAXSB256", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26341,10 +28250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ128", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26356,15 +28265,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked256", - argLen: 3, + name: "VPOR256", + argLen: 2, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26372,15 +28280,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26388,10 +28293,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", + name: "VPADDSB256", argLen: 2, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26403,10 +28308,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26418,10 +28322,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26433,15 +28336,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26449,15 +28350,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked128", - argLen: 3, + name: "VPXOR256", + argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26465,15 +28365,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26481,10 +28378,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", + name: "VPADDB512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26496,14 +28393,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26511,10 +28407,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256", + name: "VPADDBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26527,10 +28423,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked256", + name: "VPMAXSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26543,10 +28439,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked256", + name: "VPMINSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26559,29 +28455,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPOPCNTBMasked512", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26589,10 +28469,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked512", + name: "VPADDSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26605,10 +28485,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26621,10 +28500,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSUBBMasked512", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26637,10 +28515,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", + name: "VPMAXSB512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26652,10 +28530,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", + name: "VPMINSB512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26667,14 +28545,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26682,10 +28558,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", + name: "VPADDSB512", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26697,10 +28573,53 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPAVGW256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPAVGWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26713,10 +28632,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", + name: "VPMAXUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26729,10 +28648,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", + name: "VPMINUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26745,9 +28664,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked128", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26760,10 +28680,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", + name: "VPMAXUW256", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26775,10 +28695,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", + name: "VPMINUW256", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26790,9 +28710,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW128", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VPMULHUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26804,10 +28725,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", + name: "VPAVGW512", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26819,10 +28740,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked256", + name: "VPAVGWMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26835,10 +28756,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26851,10 +28772,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked256", + name: "VPMINUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26867,9 +28788,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMULHUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26882,10 +28804,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", + name: "VPMAXUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26897,10 +28819,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", + name: "VPMINUW512", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26912,9 +28834,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW256", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26926,10 +28849,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", + name: "VPAVGW128", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26941,10 +28864,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked512", + name: "VPAVGWMasked128", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26957,10 +28880,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", + name: "VPMAXUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26973,10 +28896,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked512", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26989,9 +28912,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked512", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMULHUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27004,10 +28928,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", + name: "VPMAXUW128", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27019,10 +28943,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", + name: "VPMINUW128", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27034,9 +28958,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW512", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27048,13 +28973,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27062,13 +28989,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VPMINUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27076,30 +29005,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, + name: "VPMAXUD512", argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VPMINUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27107,14 +29035,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VPMAXUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27122,44 +29051,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked512", - auxType: auxInt8, + name: "VPMINUDMasked128", argLen: 3, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VROUNDPS128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPS128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VPMAXUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27167,13 +29082,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VPMINUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27181,11 +29097,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS128", - auxType: auxInt8, + name: "VPMULUDQ128", argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27197,14 +29112,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VPMAXUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27212,14 +29128,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VPMINUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27227,30 +29144,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128", - auxType: auxInt8, - argLen: 3, + name: "VPMAXUD256", + argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPS256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPS, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27258,13 +29174,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VPMULUDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27272,13 +29189,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VPMAXUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27286,15 +29205,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, + name: "VPMINUQMasked128", + argLen: 3, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27302,14 +29221,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VPMULUDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27317,14 +29237,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VPMAXUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27332,30 +29252,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked256", - auxType: auxInt8, - argLen: 3, + name: "VPMINUQ128", + argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27363,13 +29283,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27377,13 +29299,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VPMULUDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27391,11 +29315,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPD128", - auxType: auxInt8, + name: "VPMAXUQ256", argLen: 2, commutative: true, - asm: x86.AVDPPD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27407,11 +29330,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", - auxType: auxInt8, + name: "VPMINUQ256", argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27423,14 +29345,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VPMAXUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27438,14 +29361,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VPMINUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27453,11 +29377,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked128", - auxType: auxInt8, + name: "VPMULUDQMasked512", argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27465,18 +29388,19 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VPMAXUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27484,13 +29408,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VPMINUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27498,13 +29423,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VPMULUDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27512,11 +29438,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", - auxType: auxInt8, + name: "VPAVGB128", argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27528,14 +29453,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VPAVGBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27543,14 +29469,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VPMAXUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27558,11 +29485,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked256", - auxType: auxInt8, + name: "VPMINUBMasked128", argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27570,18 +29496,19 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27589,13 +29516,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27603,30 +29531,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", - auxType: auxInt8, + name: "VPMINUB128", argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VPMADDUBSW128", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27634,14 +29560,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27649,11 +29575,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked512", - auxType: auxInt8, + name: "VPAVGBMasked256", argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27661,31 +29586,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPWMasked256", - auxType: auxInt8, + name: "VPMINUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27693,78 +29618,89 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPW512", - auxType: auxInt8, + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUB256", argLen: 2, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPWMasked512", - auxType: auxInt8, - argLen: 3, + name: "VPMINUB256", + argLen: 2, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRW128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRW, + name: "VPMADDUBSW256", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, + name: "VPAVGBMasked512", argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27772,19 +29708,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRW, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27792,27 +29729,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", - auxType: auxInt8, - argLen: 2, + name: "VPMINUBMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPD, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27820,64 +29755,61 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRD, + name: "VPMAXUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + name: "VPMINUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRD128", + name: "VRNDSCALEPS512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRD, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -27886,31 +29818,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD256", + name: "VREDUCEPS512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked256", + name: "VCMPPS512", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, - asm: x86.AVPCMPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27918,40 +29848,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VRNDSCALEPSMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRQ, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ128", + name: "VREDUCEPSMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPQ, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQMasked128", + name: "VCMPPSMasked512", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPQ, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27964,13 +29895,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRQ128", + name: "VROUNDPS128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRQ, + argLen: 1, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -27979,105 +29909,85 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ256", + name: "VRNDSCALEPS128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VREDUCEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ512", + name: "VCMPPS128", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPQ, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VRNDSCALEPSMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPEXTRB128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRB, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - }, }, }, { - name: "VPCMPB128", + name: "VREDUCEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPBMasked128", + name: "VCMPPSMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPB, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28090,13 +30000,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRB128", + name: "VROUNDPS256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRB, + argLen: 1, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -28105,92 +30014,85 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB256", + name: "VRNDSCALEPS256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPB, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VREDUCEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPB512", + name: "VCMPPS256", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPB, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VRNDSCALEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUW256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUW, + name: "VREDUCEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked256", + name: "VCMPPSMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUW, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28203,126 +30105,115 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUW, + name: "VROUNDPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VRNDSCALEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUW128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUW, + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked128", + name: "VDPPD128", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, - asm: x86.AVPCMPUW, + asm: x86.AVDPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUD512", + name: "VCMPPD128", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPUD, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VRNDSCALEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, + name: "VREDUCEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUDMasked128", + name: "VCMPPDMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUD, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28335,93 +30226,99 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, + name: "VROUNDPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, - reg: regInfo{ + name: "VRNDSCALEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ128", + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCMPPD256", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPUQ, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VRNDSCALEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VREDUCEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked256", + name: "VCMPPDMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUQ, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28434,44 +30331,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VRNDSCALEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VREDUCEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUB128", + name: "VCMPPD512", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28483,44 +30375,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VRNDSCALEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUB256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, + name: "VREDUCEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUBMasked256", + name: "VCMPPDMasked512", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28533,11 +30422,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPCMPW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28549,11 +30437,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", + name: "VPCMPWMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28565,1720 +30453,1790 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: arm.AADD, + name: "VPSHLDWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VPSHRDWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm.ASUB, + name: "VPSHLDW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VPSHRDW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSB", - argLen: 2, - asm: arm.ARSB, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: arm.AMUL, + name: "VPSHLDWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMUL", - argLen: 2, - commutative: true, - asm: arm.AMULL, + name: "VPSHRDWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMULU", - argLen: 2, - commutative: true, - asm: arm.AMULLU, + name: "VPSHLDW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLudiv", - argLen: 2, - clobberFlags: true, + name: "VPSHRDW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDS", - argLen: 2, - commutative: true, - asm: arm.AADD, + name: "VPEXTRW128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "ADDSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VPCMPW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADC", + name: "VPCMPWMasked128", + auxType: auxInt8, argLen: 3, commutative: true, - asm: arm.AADC, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.AADC, + name: "VPSHLDWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBS", - argLen: 2, - asm: arm.ASUB, + name: "VPSHRDWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VPSHLDW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBC", - argLen: 3, - asm: arm.ASBC, + name: "VPSHRDW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ASBC, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSC, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MULLU", - argLen: 2, - commutative: true, - asm: arm.AMULLU, + name: "VPROLDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULA", - argLen: 3, - asm: arm.AMULA, + name: "VPRORDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULS", - argLen: 3, - asm: arm.AMULS, + name: "VPSHLDDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: arm.AADDF, + name: "VPSHRDDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: arm.AADDD, + name: "VPROLD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBF", - argLen: 2, - asm: arm.ASUBF, + name: "VPRORD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBD", - argLen: 2, - asm: arm.ASUBD, + name: "VPSHLDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: arm.AMULF, + name: "VPSHRDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: arm.AMULD, + name: "VPEXTRD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "NMULF", - argLen: 2, - commutative: true, - asm: arm.ANMULF, + name: "VPCMPD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NMULD", - argLen: 2, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, commutative: true, - asm: arm.ANMULD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "DIVF", - argLen: 2, - asm: arm.ADIVF, + name: "VPROLDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVD", - argLen: 2, - asm: arm.ADIVD, + name: "VPRORDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAF, + name: "VPSHLDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAD, + name: "VPSHRDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSF, + name: "VPROLD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSD, + name: "VPRORD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AFMULAD, + name: "VPINSRD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: arm.AAND, + name: "VPSHLDD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AAND, + name: "VPSHRDD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: arm.AORR, + name: "VPCMPD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AORR, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: arm.AEOR, + name: "VPROLDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AEOR, + name: "VPRORDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm.ABIC, + name: "VPSHLDDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ABIC, + name: "VPSHRDDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFX", - auxType: auxInt32, + name: "VPROLD256", + auxType: auxInt8, argLen: 1, - asm: arm.ABFX, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFXU", - auxType: auxInt32, + name: "VPRORD256", + auxType: auxInt8, argLen: 1, - asm: arm.ABFXU, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVN", - argLen: 1, - asm: arm.AMVN, + name: "VPSHLDD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: arm.ANEGF, + name: "VPSHRDD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGD", - argLen: 1, - asm: arm.ANEGD, + name: "VPEXTRQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: arm.ASQRTD, + name: "VPCMPQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: arm.ASQRTF, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ABSD", - argLen: 1, - asm: arm.AABSD, + name: "VPROLQMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm.ACLZ, + name: "VPRORQMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV", - argLen: 1, - asm: arm.AREV, + name: "VPSHLDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV16", - argLen: 1, - asm: arm.AREV16, + name: "VPSHRDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm.ARBIT, + name: "VPROLQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLL", - argLen: 2, - asm: arm.ASLL, + name: "VPRORQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASLL, + name: "VPINSRQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRL", - argLen: 2, - asm: arm.ASRL, + name: "VPSHLDQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRL, + name: "VPSHRDQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm.ASRA, + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRA, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SRR", - argLen: 2, + name: "VPROLQMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRRconst", - auxType: auxInt32, - argLen: 1, + name: "VPRORQMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPSHLDQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPSHRDQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPROLQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPRORQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRL", - auxType: auxInt32, + name: "VPSHLDQ256", + auxType: auxInt8, argLen: 2, - asm: arm.ASUB, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRA", - auxType: auxInt32, + name: "VPSHRDQ256", + auxType: auxInt8, argLen: 2, - asm: arm.ASUB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBshiftRA", - auxType: auxInt32, + name: "VPROLQMasked512", + auxType: auxInt8, argLen: 2, - asm: arm.ARSB, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt32, + name: "VPRORQMasked512", + auxType: auxInt8, argLen: 2, - asm: arm.AAND, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VPSHLDQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VPSHRDQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VPROLQ512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VPRORQ512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRA", - auxType: auxInt32, + name: "VPSHLDQ512", + auxType: auxInt8, argLen: 2, - asm: arm.AORR, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLL", - auxType: auxInt32, + name: "VPSHRDQ512", + auxType: auxInt8, argLen: 2, - asm: arm.AEOR, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VPEXTRB128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "XORshiftRA", - auxType: auxInt32, + name: "VPCMPB128", + auxType: auxInt8, argLen: 2, - asm: arm.AEOR, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "XORshiftRR", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "BICshiftLL", - auxType: auxInt32, + name: "VPINSRB128", + auxType: auxInt8, argLen: 2, - asm: arm.ABIC, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRL", - auxType: auxInt32, + name: "VPCMPB256", + auxType: auxInt8, argLen: 2, - asm: arm.ABIC, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "BICshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MVNshiftLL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MVNshiftRA", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VPCMPUW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SBCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SBCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VPCMPUWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SBCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VPCMPUDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPCMPUDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPCMPUQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPCMPUQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - }, - outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - }, - }, - { - name: "ADDshiftLLreg", - argLen: 3, - asm: arm.AADD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - }, - }, - { - name: "ADDshiftRLreg", - argLen: 3, - asm: arm.AADD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "VPCMPUBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBshiftLLreg", - argLen: 3, - asm: arm.ARSB, + name: "VPCMPUBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, + { - name: "RSBshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30286,14 +32244,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "RSBshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30301,14 +32258,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftLLreg", - argLen: 3, - asm: arm.AAND, + name: "SUB", + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30316,14 +32272,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRLreg", - argLen: 3, - asm: arm.AAND, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30331,14 +32286,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRAreg", - argLen: 3, - asm: arm.AAND, + name: "RSB", + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30346,14 +32300,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftLLreg", - argLen: 3, - asm: arm.AORR, + name: "RSBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30361,14 +32314,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRLreg", - argLen: 3, - asm: arm.AORR, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30376,14 +32329,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRAreg", - argLen: 3, - asm: arm.AORR, + name: "HMUL", + argLen: 2, + commutative: true, + asm: arm.AMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30391,14 +32344,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftLLreg", - argLen: 3, - asm: arm.AEOR, + name: "HMULU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30406,59 +32359,61 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRLreg", - argLen: 3, - asm: arm.AEOR, + name: "CALLudiv", + argLen: 2, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "XORshiftRAreg", - argLen: 3, - asm: arm.AEOR, + name: "ADDS", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BICshiftLLreg", - argLen: 3, - asm: arm.ABIC, + name: "ADDSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BICshiftRLreg", - argLen: 3, - asm: arm.ABIC, + name: "ADC", + argLen: 3, + commutative: true, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30466,14 +32421,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRAreg", - argLen: 3, - asm: arm.ABIC, + name: "ADCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30481,56 +32435,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MVNshiftLLreg", + name: "SUBS", argLen: 2, - asm: arm.AMVN, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVNshiftRLreg", - argLen: 2, - asm: arm.AMVN, + name: "SUBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVNshiftRAreg", - argLen: 2, - asm: arm.AMVN, + name: "RSBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADCshiftLLreg", - argLen: 4, - asm: arm.AADC, + name: "SBC", + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30538,14 +32494,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADCshiftRLreg", - argLen: 4, - asm: arm.AADC, + name: "SBCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30553,14 +32508,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADCshiftRAreg", - argLen: 4, - asm: arm.AADC, + name: "RSCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30568,24 +32522,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBCshiftLLreg", - argLen: 4, - asm: arm.ASBC, + name: "MULLU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SBCshiftRLreg", - argLen: 4, - asm: arm.ASBC, + name: "MULA", + argLen: 3, + asm: arm.AMULA, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30598,9 +32553,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBCshiftRAreg", - argLen: 4, - asm: arm.ASBC, + name: "MULS", + argLen: 3, + asm: arm.AMULS, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30613,661 +32568,713 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "RSCshiftLLreg", - argLen: 4, - asm: arm.ARSC, + name: "ADDF", + argLen: 2, + commutative: true, + asm: arm.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSCshiftRLreg", - argLen: 4, - asm: arm.ARSC, + name: "ADDD", + argLen: 2, + commutative: true, + asm: arm.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSCshiftRAreg", - argLen: 4, - asm: arm.ARSC, + name: "SUBF", + argLen: 2, + asm: arm.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDSshiftLLreg", - argLen: 3, - asm: arm.AADD, + name: "SUBD", + argLen: 2, + asm: arm.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDSshiftRLreg", - argLen: 3, - asm: arm.AADD, + name: "MULF", + argLen: 2, + commutative: true, + asm: arm.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDSshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "MULD", + argLen: 2, + commutative: true, + asm: arm.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBSshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "NMULF", + argLen: 2, + commutative: true, + asm: arm.ANMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBSshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "NMULD", + argLen: 2, + commutative: true, + asm: arm.ANMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBSshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "DIVF", + argLen: 2, + asm: arm.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSBSshiftLLreg", - argLen: 3, - asm: arm.ARSB, + name: "DIVD", + argLen: 2, + asm: arm.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSBSshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "MULAF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSBSshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "MULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm.ACMP, + name: "MULSF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSF, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ACMP, + name: "MULSD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CMN", + name: "FMULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AFMULAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "AND", argLen: 2, commutative: true, - asm: arm.ACMN, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "CMNconst", + name: "ANDconst", auxType: auxInt32, argLen: 1, - asm: arm.ACMN, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TST", + name: "OR", argLen: 2, commutative: true, - asm: arm.ATST, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TSTconst", + name: "ORconst", auxType: auxInt32, argLen: 1, - asm: arm.ATST, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TEQ", + name: "XOR", argLen: 2, commutative: true, - asm: arm.ATEQ, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TEQconst", + name: "XORconst", auxType: auxInt32, argLen: 1, - asm: arm.ATEQ, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "CMPF", - argLen: 2, - asm: arm.ACMPF, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPD", + name: "BIC", argLen: 2, - asm: arm.ACMPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - }, - { - name: "CMPshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "CMPshiftRL", + name: "BICconst", auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + argLen: 1, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftRA", + name: "BFX", auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + argLen: 1, + asm: arm.ABFX, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftLL", + name: "BFXU", auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + argLen: 1, + asm: arm.ABFXU, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "MVN", + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "NEGF", + argLen: 1, + asm: arm.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "NEGD", + argLen: 1, + asm: arm.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "SQRTD", + argLen: 1, + asm: arm.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TSTshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "SQRTF", + argLen: 1, + asm: arm.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TEQshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "ABSD", + argLen: 1, + asm: arm.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TEQshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "CLZ", + argLen: 1, + asm: arm.ACLZ, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "REV", + argLen: 1, + asm: arm.AREV, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftLLreg", - argLen: 3, - asm: arm.ACMP, + name: "REV16", + argLen: 1, + asm: arm.AREV16, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftRLreg", - argLen: 3, - asm: arm.ACMP, + name: "RBIT", + argLen: 1, + asm: arm.ARBIT, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftRAreg", - argLen: 3, - asm: arm.ACMP, + name: "SLL", + argLen: 2, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftLLreg", - argLen: 3, - asm: arm.ACMN, + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRLreg", - argLen: 3, - asm: arm.ACMN, + name: "SRL", + argLen: 2, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRAreg", - argLen: 3, - asm: arm.ACMN, + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTshiftLLreg", - argLen: 3, - asm: arm.ATST, + name: "SRA", + argLen: 2, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTshiftRLreg", - argLen: 3, - asm: arm.ATST, + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTshiftRAreg", - argLen: 3, - asm: arm.ATST, + name: "SRR", + argLen: 2, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftLLreg", - argLen: 3, - asm: arm.ATEQ, + name: "SRRconst", + auxType: auxInt32, + argLen: 1, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftRLreg", - argLen: 3, - asm: arm.ATEQ, + name: "ADDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftRAreg", - argLen: 3, - asm: arm.ATEQ, + name: "ADDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPF0", - argLen: 1, - asm: arm.ACMPF, + name: "ADDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPD0", - argLen: 1, - asm: arm.ACMPD, + name: "SUBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVW, - reg: regInfo{ outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVF, + name: "SUBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVD, + name: "SUBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm.AMOVW, + name: "RSBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294975488}, // SP SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31275,15 +33282,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVB, + name: "RSBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31291,15 +33297,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVBU, + name: "RSBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31307,15 +33312,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVH, + name: "ANDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31323,15 +33327,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVHU, + name: "ANDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31339,15 +33342,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVW, + name: "ANDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31355,115 +33357,104 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVF, + name: "ORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVD, + name: "ORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVB, + name: "ORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVW, + name: "XORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVF, + name: "XORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVD, + name: "XORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm.AMOVW, + name: "XORshiftRR", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31471,14 +33462,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadshiftLL", + name: "BICshiftLL", auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31486,14 +33477,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadshiftRL", + name: "BICshiftRL", auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31501,14 +33492,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadshiftRA", + name: "BICshiftRA", auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31516,13 +33507,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm.AMOVBU, + name: "MVNshiftLL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31530,13 +33521,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm.AMOVB, + name: "MVNshiftRL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31544,13 +33535,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm.AMOVHU, + name: "MVNshiftRA", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31558,13 +33549,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm.AMOVH, + name: "ADCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31572,87 +33564,104 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm.AMOVW, + name: "ADCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstoreshiftLL", + name: "ADCshiftRA", auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + argLen: 3, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstoreshiftRL", + name: "SBCshiftLL", auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstoreshiftRA", + name: "SBCshiftRL", auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm.AMOVB, + name: "SBCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm.AMOVH, + name: "RSCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm.AMOVBS, + name: "RSCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31660,12 +33669,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm.AMOVBU, + name: "RSCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31673,204 +33684,233 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm.AMOVHS, + name: "ADDSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm.AMOVHU, + name: "ADDSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: arm.AMOVW, + name: "ADDSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "SUBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: arm.AMOVWF, + name: "SUBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: arm.AMOVWD, + name: "SUBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUF", - argLen: 1, - asm: arm.AMOVWF, + name: "RSBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUD", - argLen: 1, - asm: arm.AMOVWD, + name: "RSBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFW", - argLen: 1, - asm: arm.AMOVFW, + name: "RSBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDW", - argLen: 1, - asm: arm.AMOVDW, + name: "ADDshiftLLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFWU", - argLen: 1, - asm: arm.AMOVFW, + name: "ADDshiftRLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDWU", - argLen: 1, - asm: arm.AMOVDW, + name: "ADDshiftRAreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: arm.AMOVFD, + name: "SUBshiftLLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: arm.AMOVDF, + name: "SUBshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMOVWHSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "SUBshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31878,14 +33918,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CMOVWLSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "RSBshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31893,13 +33933,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SRAcond", + name: "RSBshiftRLreg", argLen: 3, - asm: arm.ASRA, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31907,1997 +33948,1993 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "RSBshiftRAreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "ANDshiftLLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 128}, // R7 - {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "ANDshiftRLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "ANDshiftRAreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "Equal", - argLen: 1, + name: "ORshiftLLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "ORshiftRLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessThan", - argLen: 1, + name: "ORshiftRAreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "XORshiftLLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "XORshiftRLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "XORshiftRAreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "BICshiftLLreg", + argLen: 3, + asm: arm.ABIC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "BICshiftRLreg", + argLen: 3, + asm: arm.ABIC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "BICshiftRAreg", + argLen: 3, + asm: arm.ABIC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "MVNshiftLLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "MVNshiftRLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 20482, // R1 R12 R14 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "MVNshiftRAreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, + name: "ADCshiftLLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2, // R1 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "ADCshiftRLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 6, // R1 R2 - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 128}, // R7 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "ADCshiftRAreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ - outputs: []outputInfo{ + inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, - reg: regInfo{ outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SBCshiftLLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SBCshiftRLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SBCshiftRAreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, + name: "RSCshiftLLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicExtendB", - auxType: auxInt64, - argLen: 4, - call: true, + name: "RSCshiftRLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 2}, // R1 - {2, 4}, // R2 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, + name: "RSCshiftRAreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 1}, // R0 - {2, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - reg: regInfo{ - clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 256}, // R8 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, - { - name: "ADCSflags", - argLen: 3, - commutative: true, - asm: arm64.AADCS, + name: "ADDSshiftLLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADCzerocarry", - argLen: 1, - asm: arm64.AADC, + name: "ADDSshiftRLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - asm: arm64.AADD, + name: "ADDSshiftRAreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADD, + name: "SUBSshiftLLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDSconstflags", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADDS, + name: "SUBSshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDSflags", - argLen: 2, - commutative: true, - asm: arm64.AADDS, + name: "SUBSshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm64.ASUB, + name: "RSBSshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ASUB, + name: "RSBSshiftRLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SBCSflags", + name: "RSBSshiftRAreg", argLen: 3, - asm: arm64.ASBCS, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBSflags", + name: "CMP", argLen: 2, - asm: arm64.ASUBS, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: arm64.AMUL, + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - }, - }, - { - name: "MULW", - argLen: 2, - commutative: true, - asm: arm64.AMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MNEG", + name: "CMN", argLen: 2, commutative: true, - asm: arm64.AMNEG, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MNEGW", - argLen: 2, - commutative: true, - asm: arm64.AMNEGW, + name: "CMNconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MULH", + name: "TST", argLen: 2, commutative: true, - asm: arm64.ASMULH, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMULH", - argLen: 2, - commutative: true, - asm: arm64.AUMULH, + name: "TSTconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MULL", + name: "TEQ", argLen: 2, commutative: true, - asm: arm64.ASMULL, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMULL", - argLen: 2, - commutative: true, - asm: arm64.AUMULL, + name: "TEQconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "DIV", + name: "CMPF", argLen: 2, - asm: arm64.ASDIV, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "UDIV", + name: "CMPD", argLen: 2, - asm: arm64.AUDIV, + asm: arm.ACMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVW", - argLen: 2, - asm: arm64.ASDIVW, + name: "CMPshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UDIVW", - argLen: 2, - asm: arm64.AUDIVW, + name: "CMPshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MOD", - argLen: 2, - asm: arm64.AREM, + name: "CMPshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMOD", - argLen: 2, - asm: arm64.AUREM, + name: "CMNshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MODW", - argLen: 2, - asm: arm64.AREMW, + name: "CMNshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMODW", - argLen: 2, - asm: arm64.AUREMW, + name: "CMNshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FADDS", - argLen: 2, - commutative: true, - asm: arm64.AFADDS, + name: "TSTshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FADDD", - argLen: 2, - commutative: true, - asm: arm64.AFADDD, + name: "TSTshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: arm64.AFSUBS, + name: "TSTshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: arm64.AFSUBD, + name: "TEQshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: arm64.AFMULS, + name: "TEQshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FMULD", - argLen: 2, - commutative: true, - asm: arm64.AFMULD, + name: "TEQshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FNMULS", - argLen: 2, - commutative: true, - asm: arm64.AFNMULS, + name: "CMPshiftLLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNMULD", - argLen: 2, - commutative: true, - asm: arm64.AFNMULD, + name: "CMPshiftRLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: arm64.AFDIVS, + name: "CMPshiftRAreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: arm64.AFDIVD, + name: "CMNshiftLLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: arm64.AAND, + name: "CMNshiftRLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AAND, + name: "CMNshiftRAreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: arm64.AORR, + name: "TSTshiftLLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AORR, + name: "TSTshiftRLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: arm64.AEOR, + name: "TSTshiftRAreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AEOR, + name: "TEQshiftLLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm64.ABIC, + name: "TEQshiftRLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "EON", - argLen: 2, - asm: arm64.AEON, + name: "TEQshiftRAreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ORN", - argLen: 2, - asm: arm64.AORN, + name: "CMPF0", + argLen: 1, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MVN", + name: "CMPD0", argLen: 1, - asm: arm64.AMVN, + asm: arm.ACMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "NEG", - argLen: 1, - asm: arm64.ANEG, + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVW, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGSflags", - argLen: 1, - asm: arm64.ANEGS, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "NGCzerocarry", - argLen: 1, - asm: arm64.ANGC, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FABSD", - argLen: 1, - asm: arm64.AFABSD, + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294975488}, // SP SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNEGS", - argLen: 1, - asm: arm64.AFNEGS, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNEGD", - argLen: 1, - asm: arm64.AFNEGD, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FSQRTD", - argLen: 1, - asm: arm64.AFSQRTD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: arm64.AFSQRTS, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMIND", - argLen: 2, - asm: arm64.AFMIND, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMINS", - argLen: 2, - asm: arm64.AFMINS, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMAXD", - argLen: 2, - asm: arm64.AFMAXD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMAXS", - argLen: 2, - asm: arm64.AFMAXS, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "REV", - argLen: 1, - asm: arm64.AREV, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "REVW", - argLen: 1, - asm: arm64.AREVW, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "REV16", - argLen: 1, - asm: arm64.AREV16, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "REV16W", - argLen: 1, - asm: arm64.AREV16W, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm64.ARBIT, + name: "MOVWloadidx", + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "RBITW", - argLen: 1, - asm: arm64.ARBITW, + name: "MOVWloadshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm64.ACLZ, + name: "MOVWloadshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZW", - argLen: 1, - asm: arm64.ACLZW, + name: "MOVWloadshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VCNT", - argLen: 1, - asm: arm64.AVCNT, + name: "MOVBUloadidx", + argLen: 3, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VUADDLV", - argLen: 1, - asm: arm64.AVUADDLV, + name: "MOVBloadidx", + argLen: 3, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "MOVHUloadidx", + argLen: 3, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "MOVHloadidx", + argLen: 3, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMADDS", - argLen: 3, - asm: arm64.AFMADDS, + name: "MOVWstoreidx", + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FMADDD", - argLen: 3, - asm: arm64.AFMADDD, + name: "MOVWstoreshiftLL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FNMADDS", - argLen: 3, - asm: arm64.AFNMADDS, + name: "MOVWstoreshiftRL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FNMADDD", - argLen: 3, - asm: arm64.AFNMADDD, + name: "MOVWstoreshiftRA", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: arm64.AFMSUBS, + name: "MOVBstoreidx", + argLen: 4, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FMSUBD", - argLen: 3, - asm: arm64.AFMSUBD, + name: "MOVHstoreidx", + argLen: 4, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FNMSUBS", - argLen: 3, - asm: arm64.AFNMSUBS, + name: "MOVBreg", + argLen: 1, + asm: arm.AMOVBS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNMSUBD", - argLen: 3, - asm: arm64.AFNMSUBD, + name: "MOVBUreg", + argLen: 1, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MADD", - argLen: 3, - asm: arm64.AMADD, + name: "MOVHreg", + argLen: 1, + asm: arm.AMOVHS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MADDW", - argLen: 3, - asm: arm64.AMADDW, + name: "MOVHUreg", + argLen: 1, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MSUB", - argLen: 3, - asm: arm64.AMSUB, + name: "MOVWreg", + argLen: 1, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MSUBW", - argLen: 3, - asm: arm64.AMSUBW, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLL", - argLen: 2, - asm: arm64.ALSL, + name: "MOVWF", + argLen: 1, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SLLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSL, + name: "MOVWD", + argLen: 1, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRL", - argLen: 2, - asm: arm64.ALSR, + name: "MOVWUF", + argLen: 1, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSR, + name: "MOVWUD", + argLen: 1, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm64.AASR, + name: "MOVFW", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AASR, + name: "MOVDW", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROR", - argLen: 2, - asm: arm64.AROR, + name: "MOVFWU", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "RORW", - argLen: 2, - asm: arm64.ARORW, + name: "MOVDWU", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "RORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AROR, + name: "MOVFD", + argLen: 1, + asm: arm.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RORWconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ARORW, + name: "MOVDF", + argLen: 1, + asm: arm.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "EXTRconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTR, + name: "CMOVWHSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "EXTRWconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTRW, + name: "CMOVWLSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm64.ACMP, + name: "SRAcond", + argLen: 3, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMP, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 128}, // R7 + {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CMPW", - argLen: 2, - asm: arm64.ACMPW, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMPW, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "CMN", - argLen: 2, - commutative: true, - asm: arm64.ACMN, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMN, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNW", - argLen: 2, - commutative: true, - asm: arm64.ACMNW, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMNW, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TST", - argLen: 2, - commutative: true, - asm: arm64.ATST, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ATST, + name: "GreaterEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTW", - argLen: 2, - commutative: true, - asm: arm64.ATSTW, + name: "LessThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ATSTW, + name: "LessEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FCMPS", - argLen: 2, - asm: arm64.AFCMPS, + name: "GreaterThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FCMPD", - argLen: 2, - asm: arm64.AFCMPD, + name: "GreaterEqualU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20482, // R1 R12 R14 }, }, { - name: "FCMPS0", - argLen: 1, - asm: arm64.AFCMPS, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "FCMPD0", - argLen: 1, - asm: arm64.AFCMPD, + name: "LoweredZero", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2}, // R1 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2, // R1 }, }, { - name: "MVNshiftLL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 128}, // R7 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVNshiftRA", + name: "LoweredPanicBoundsA", auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MVNshiftRO", + name: "LoweredPanicBoundsB", auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "NEGshiftLL", + name: "LoweredPanicBoundsC", auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "NEGshiftRL", + name: "LoweredPanicExtendA", auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 16}, // R4 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "NEGshiftRA", + name: "LoweredPanicExtendB", auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 16}, // R4 + {1, 2}, // R1 + {2, 4}, // R2 }, }, }, { - name: "ADDshiftLL", + name: "LoweredPanicExtendC", auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 16}, // R4 + {1, 1}, // R0 + {2, 2}, // R1 }, + }, + }, + { + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 256}, // R8 }, }, }, + { - name: "ADDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "ADCSflags", + argLen: 3, + commutative: true, + asm: arm64.AADCS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "ADCzerocarry", + argLen: 1, + asm: arm64.AADC, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -33909,14 +35946,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SUBshiftRL", + name: "ADDconst", auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + argLen: 1, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -33924,40 +35960,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SUBshiftRA", + name: "ADDSconstflags", auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + argLen: 1, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "ADDSflags", + argLen: 2, + commutative: true, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "SUB", + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -33969,14 +36005,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRA", + name: "SUBconst", auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + argLen: 1, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -33984,40 +36019,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "SBCSflags", + argLen: 3, + asm: arm64.ASBCS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "SUBSflags", + argLen: 2, + asm: arm64.ASUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm64.AMUL, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34029,10 +36064,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "MULW", + argLen: 2, + commutative: true, + asm: arm64.AMULW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34044,10 +36079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "MNEG", + argLen: 2, + commutative: true, + asm: arm64.AMNEG, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34059,10 +36094,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "MNEGW", + argLen: 2, + commutative: true, + asm: arm64.AMNEGW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34074,10 +36109,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "MULH", + argLen: 2, + commutative: true, + asm: arm64.ASMULH, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34089,10 +36124,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "UMULH", + argLen: 2, + commutative: true, + asm: arm64.AUMULH, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34104,10 +36139,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "MULL", + argLen: 2, + commutative: true, + asm: arm64.ASMULL, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34119,10 +36154,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "UMULL", + argLen: 2, + commutative: true, + asm: arm64.AUMULL, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34134,10 +36169,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "DIV", + argLen: 2, + asm: arm64.ASDIV, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34149,10 +36183,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "UDIV", + argLen: 2, + asm: arm64.AUDIV, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34164,10 +36197,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "DIVW", + argLen: 2, + asm: arm64.ASDIVW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34179,10 +36211,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "UDIVW", + argLen: 2, + asm: arm64.AUDIVW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34194,10 +36225,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "MOD", + argLen: 2, + asm: arm64.AREM, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34209,10 +36239,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "UMOD", + argLen: 2, + asm: arm64.AUREM, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34224,10 +36253,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "MODW", + argLen: 2, + asm: arm64.AREMW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34239,10 +36267,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "UMODW", + argLen: 2, + asm: arm64.AUREMW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34254,180 +36281,203 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "FADDS", + argLen: 2, + commutative: true, + asm: arm64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ORNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "FADDD", + argLen: 2, + commutative: true, + asm: arm64.AFADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ORNshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "FSUBS", + argLen: 2, + asm: arm64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "FSUBD", + argLen: 2, + asm: arm64.AFSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "FMULS", + argLen: 2, + commutative: true, + asm: arm64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "FMULD", + argLen: 2, + commutative: true, + asm: arm64.AFMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "FNMULS", + argLen: 2, + commutative: true, + asm: arm64.AFNMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "FNMULD", + argLen: 2, + commutative: true, + asm: arm64.AFNMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "FDIVS", + argLen: 2, + asm: arm64.AFDIVS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "FDIVD", + argLen: 2, + asm: arm64.AFDIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "AND", + argLen: 2, + commutative: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "TSTshiftRA", + name: "ANDconst", auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + argLen: 1, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TSTshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "OR", + argLen: 2, + commutative: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "BFI", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFI, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34435,15 +36485,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BFXIL", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFXIL, + name: "XOR", + argLen: 2, + commutative: true, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34451,10 +36500,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBFIZ", - auxType: auxARM64BitField, + name: "XORconst", + auxType: auxInt64, argLen: 1, - asm: arm64.ASBFIZ, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34465,13 +36514,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.ASBFX, + name: "BIC", + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34479,13 +36528,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "UBFIZ", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFIZ, + name: "EON", + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34493,13 +36542,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "UBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFX, + name: "ORN", + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34507,179 +36556,142 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: arm64.AMOVD, + name: "MVN", + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVS, + name: "NEG", + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVD, - reg: regInfo{ outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm64.AMOVD, + name: "NEGSflags", + argLen: 1, + asm: arm64.ANEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037928517632}, // SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVB, + name: "NGCzerocarry", + argLen: 1, + asm: arm64.ANGC, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVBU, + name: "FABSD", + argLen: 1, + asm: arm64.AFABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVH, + name: "FNEGS", + argLen: 1, + asm: arm64.AFNEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVHU, + name: "FNEGD", + argLen: 1, + asm: arm64.AFNEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVW, + name: "FSQRTD", + argLen: 1, + asm: arm64.AFSQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVWU, + name: "FSQRTS", + argLen: 1, + asm: arm64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVD, + name: "FMIND", + argLen: 2, + asm: arm64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVS, + name: "FMINS", + argLen: 2, + asm: arm64.AFMINS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34687,15 +36699,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVD, + name: "FMAXD", + argLen: 2, + asm: arm64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34703,98 +36713,78 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDP", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDP, + name: "FMAXS", + argLen: 2, + asm: arm64.AFMAXS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LDPW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPW, + name: "REV", + argLen: 1, + asm: arm64.AREV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LDPSW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPSW, + name: "REVW", + argLen: 1, + asm: arm64.AREVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FLDPD", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPD, + name: "REV16", + argLen: 1, + asm: arm64.AREV16, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FLDPS", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPS, + name: "REV16W", + argLen: 1, + asm: arm64.AREV16W, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: arm64.AMOVD, + name: "RBIT", + argLen: 1, + asm: arm64.ARBIT, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34802,13 +36792,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm64.AMOVW, + name: "RBITW", + argLen: 1, + asm: arm64.ARBITW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34816,13 +36805,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: arm64.AMOVWU, + name: "CLZ", + argLen: 1, + asm: arm64.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34830,13 +36818,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm64.AMOVH, + name: "CLZW", + argLen: 1, + asm: arm64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34844,55 +36831,53 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm64.AMOVHU, + name: "VCNT", + argLen: 1, + asm: arm64.AVCNT, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm64.AMOVB, + name: "VUADDLV", + argLen: 1, + asm: arm64.AVUADDLV, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm64.AMOVBU, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: arm64.AFMOVS, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34900,13 +36885,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDloadidx", + name: "FMADDS", argLen: 3, - asm: arm64.AFMOVD, + asm: arm64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34914,83 +36900,89 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHloadidx2", + name: "FMADDD", argLen: 3, - asm: arm64.AMOVH, + asm: arm64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUloadidx2", + name: "FNMADDS", argLen: 3, - asm: arm64.AMOVHU, + asm: arm64.AFNMADDS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWloadidx4", + name: "FNMADDD", argLen: 3, - asm: arm64.AMOVW, + asm: arm64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWUloadidx4", + name: "FMSUBS", argLen: 3, - asm: arm64.AMOVWU, + asm: arm64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDloadidx8", + name: "FMSUBD", argLen: 3, - asm: arm64.AMOVD, + asm: arm64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMOVSloadidx4", + name: "FNMSUBS", argLen: 3, - asm: arm64.AFMOVS, + asm: arm64.AFNMSUBS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34998,13 +36990,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDloadidx8", + name: "FNMSUBD", argLen: 3, - asm: arm64.AFMOVD, + asm: arm64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -35012,389 +37005,418 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVB, + name: "MADD", + argLen: 3, + asm: arm64.AMADD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVH, + name: "MADDW", + argLen: 3, + asm: arm64.AMADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVW, + name: "MSUB", + argLen: 3, + asm: arm64.AMSUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVD, + name: "MSUBW", + argLen: 3, + asm: arm64.AMSUBW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVS, + name: "SLL", + argLen: 2, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVD, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "STP", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTP, + name: "SRL", + argLen: 2, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "STPW", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTPW, + name: "SRLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSTPD", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPD, + name: "SRA", + argLen: 2, + asm: arm64.AASR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSTPS", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPS, + name: "SRAconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AASR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm64.AMOVB, + name: "ROR", + argLen: 2, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm64.AMOVH, + name: "RORW", + argLen: 2, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm64.AMOVW, + name: "RORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: arm64.AMOVD, + name: "RORWconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: arm64.AFMOVS, + name: "EXTRconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTR, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: arm64.AFMOVD, + name: "EXTRWconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTRW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstoreidx2", - argLen: 4, - asm: arm64.AMOVH, + name: "CMP", + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVWstoreidx4", - argLen: 4, - asm: arm64.AMOVW, + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVDstoreidx8", - argLen: 4, - asm: arm64.AMOVD, + name: "CMPW", + argLen: 2, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVSstoreidx4", - argLen: 4, - asm: arm64.AFMOVS, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVDstoreidx8", - argLen: 4, - asm: arm64.AFMOVD, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVDgpfp", - argLen: 1, - asm: arm64.AFMOVD, + name: "CMNconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVDfpgp", - argLen: 1, - asm: arm64.AFMOVD, + name: "CMNW", + argLen: 2, + commutative: true, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVSgpfp", - argLen: 1, - asm: arm64.AFMOVS, + name: "CMNWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVSfpgp", - argLen: 1, - asm: arm64.AFMOVS, + name: "TST", + argLen: 2, + commutative: true, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm64.AMOVB, + name: "TSTconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm64.AMOVBU, + name: "TSTW", + argLen: 2, + commutative: true, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm64.AMOVH, + name: "TSTWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + { + name: "FCMPS", + argLen: 2, + asm: arm64.AFCMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm64.AMOVHU, + name: "FCMPD", + argLen: 2, + asm: arm64.AFCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + { + name: "FCMPS0", + argLen: 1, + asm: arm64.AFCMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWreg", + name: "FCMPD0", argLen: 1, - asm: arm64.AMOVW, + asm: arm64.AFCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MVNshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -35405,9 +37427,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWUreg", - argLen: 1, - asm: arm64.AMOVWU, + name: "MVNshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -35418,9 +37441,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDreg", - argLen: 1, - asm: arm64.AMOVD, + name: "MVNshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -35431,12 +37455,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "MVNshiftRO", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35444,116 +37469,131 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SCVTFWS", - argLen: 1, - asm: arm64.ASCVTFWS, + name: "NEGshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SCVTFWD", - argLen: 1, - asm: arm64.ASCVTFWD, + name: "NEGshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFWS", - argLen: 1, - asm: arm64.AUCVTFWS, + name: "NEGshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFWD", - argLen: 1, - asm: arm64.AUCVTFWD, + name: "ADDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SCVTFS", - argLen: 1, - asm: arm64.ASCVTFS, + name: "ADDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SCVTFD", - argLen: 1, - asm: arm64.ASCVTFD, + name: "ADDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFS", - argLen: 1, - asm: arm64.AUCVTFS, + name: "SUBshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFD", - argLen: 1, - asm: arm64.AUCVTFD, + name: "SUBshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCVTZSSW", - argLen: 1, - asm: arm64.AFCVTZSSW, + name: "SUBshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35561,12 +37601,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZSDW", - argLen: 1, - asm: arm64.AFCVTZSDW, + name: "ANDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35574,12 +37616,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUSW", - argLen: 1, - asm: arm64.AFCVTZUSW, + name: "ANDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35587,12 +37631,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUDW", - argLen: 1, - asm: arm64.AFCVTZUDW, + name: "ANDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35600,12 +37646,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZSS", - argLen: 1, - asm: arm64.AFCVTZSS, + name: "ANDshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35613,12 +37661,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZSD", - argLen: 1, - asm: arm64.AFCVTZSD, + name: "ORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35626,12 +37676,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUS", - argLen: 1, - asm: arm64.AFCVTZUS, + name: "ORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35639,12 +37691,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUD", - argLen: 1, - asm: arm64.AFCVTZUD, + name: "ORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35652,105 +37706,119 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTSD", - argLen: 1, - asm: arm64.AFCVTSD, + name: "ORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCVTDS", - argLen: 1, - asm: arm64.AFCVTDS, + name: "XORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTAD", - argLen: 1, - asm: arm64.AFRINTAD, + name: "XORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTMD", - argLen: 1, - asm: arm64.AFRINTMD, + name: "XORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTND", - argLen: 1, - asm: arm64.AFRINTND, + name: "XORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTPD", - argLen: 1, - asm: arm64.AFRINTPD, + name: "BICshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTZD", - argLen: 1, - asm: arm64.AFRINTZD, + name: "BICshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CSEL", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSEL, + name: "BICshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35758,13 +37826,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSEL0", - auxType: auxCCop, + name: "BICshiftRO", + auxType: auxInt64, argLen: 2, - asm: arm64.ACSEL, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35772,14 +37841,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSINC", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINC, + name: "EONshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35787,14 +37856,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSINV", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINV, + name: "EONshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35802,14 +37871,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSNEG", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSNEG, + name: "EONshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35817,231 +37886,294 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSETM", - auxType: auxCCop, - argLen: 1, - asm: arm64.ACSETM, + name: "EONshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // R26 - {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "ORNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "Equal", - argLen: 1, + name: "ORNshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "CMPshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessThan", - argLen: 1, + name: "CMPshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "CMPshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "CMNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "CMNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "CMNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "TSTshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "TSTshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "TSTshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessThanF", - argLen: 1, + name: "TSTshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessEqualF", - argLen: 1, + name: "BFI", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFI, reg: regInfo{ + inputs: []inputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "GreaterThanF", - argLen: 1, + name: "BFXIL", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFXIL, reg: regInfo{ + inputs: []inputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "GreaterEqualF", - argLen: 1, + name: "SBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFIZ, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotLessThanF", - argLen: 1, + name: "SBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotLessEqualF", - argLen: 1, + name: "UBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFIZ, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotGreaterThanF", - argLen: 1, + name: "UBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotGreaterEqualF", - argLen: 1, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: arm64.AMOVD, reg: regInfo{ outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -36049,122 +38181,132 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LessThanNoov", - argLen: 1, + name: "FMOVSconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVS, reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "GreaterEqualNoov", - argLen: 1, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - unsafePoint: true, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 9223372037928517632}, // SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "LoweredZero", - argLen: 3, - clobberFlags: true, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 65536}, // R16 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 65536, // R16 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - unsafePoint: true, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "LoweredMove", - argLen: 4, - clobberFlags: true, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - faultOnNilArg1: true, + symEffect: SymRead, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 131072}, // R17 - {1, 65536}, // R16 - {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - clobbers: 16973824, // R16 R17 R25 - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 33554432}, // R26 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVHU, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LDAR", + name: "MOVWUload", + auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - asm: arm64.ALDAR, + symEffect: SymRead, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB @@ -36175,10 +38317,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDARB", + name: "MOVDload", + auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - asm: arm64.ALDARB, + symEffect: SymRead, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB @@ -36189,118 +38333,129 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDARW", + name: "FMOVSload", + auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - asm: arm64.ALDARW, + symEffect: SymRead, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "STLRB", - argLen: 3, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRB, + symEffect: SymRead, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, }, }, { - name: "STLR", - argLen: 3, + name: "LDP", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLR, + symEffect: SymRead, + asm: arm64.ALDP, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, + outputs: []outputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, }, }, { - name: "STLRW", - argLen: 3, + name: "LDPW", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRW, + symEffect: SymRead, + asm: arm64.ALDPW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, + outputs: []outputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LDPSW", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDPSW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FLDPD", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FLDPS", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVDloadidx", + argLen: 3, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36309,14 +38464,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVWloadidx", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36325,15 +38478,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVWUloadidx", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36342,15 +38492,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHloadidx", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36359,15 +38506,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHUloadidx", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36376,14 +38520,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVBloadidx", + argLen: 3, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36392,14 +38534,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVBUloadidx", + argLen: 3, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36408,55 +38548,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FMOVSloadidx", + argLen: 3, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FMOVDloadidx", + argLen: 3, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHloadidx2", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36465,17 +38590,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHUloadidx2", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36484,17 +38604,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "MOVWloadidx4", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36503,17 +38618,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "MOVWUloadidx4", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36522,17 +38632,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "MOVDloadidx8", + argLen: 3, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36541,2090 +38646,1995 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "FMOVSloadidx4", + argLen: 3, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "FMOVDloadidx8", + argLen: 3, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicAnd8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicOr8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicAnd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicOr64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAnd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicOr32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "STP", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "STPW", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTPW, reg: regInfo{ - clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - outputs: []outputInfo{ - {0, 16777216}, // R25 + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSTPD", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSTPS", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBstoreidx", + argLen: 4, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "PRFM", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: arm64.APRFM, + name: "MOVHstoreidx", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "DMB", - auxType: auxInt64, - argLen: 1, - hasSideEffects: true, - asm: arm64.ADMB, - reg: regInfo{}, + name: "MOVWstoreidx", + argLen: 4, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, }, { - name: "ZERO", - argLen: 0, - zeroWidth: true, - fixedReg: true, - reg: regInfo{}, + name: "MOVDstoreidx", + argLen: 4, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, }, - { - name: "NEGV", - argLen: 1, + name: "FMOVSstoreidx", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGF", - argLen: 1, - asm: loong64.ANEGF, + name: "FMOVDstoreidx", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGD", - argLen: 1, - asm: loong64.ANEGD, + name: "MOVHstoreidx2", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SQRTD", - argLen: 1, - asm: loong64.ASQRTD, + name: "MOVWstoreidx4", + argLen: 4, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SQRTF", - argLen: 1, - asm: loong64.ASQRTF, + name: "MOVDstoreidx8", + argLen: 4, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ABSD", - argLen: 1, - asm: loong64.AABSD, + name: "FMOVSstoreidx4", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZW", - argLen: 1, - asm: loong64.ACLZW, + name: "FMOVDstoreidx8", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZV", + name: "FMOVDgpfp", argLen: 1, - asm: loong64.ACLZV, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CTZW", + name: "FMOVDfpgp", argLen: 1, - asm: loong64.ACTZW, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CTZV", + name: "FMOVSgpfp", argLen: 1, - asm: loong64.ACTZV, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "REVB2H", + name: "FMOVSfpgp", argLen: 1, - asm: loong64.AREVB2H, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REVB2W", + name: "MOVBreg", argLen: 1, - asm: loong64.AREVB2W, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REVBV", + name: "MOVBUreg", argLen: 1, - asm: loong64.AREVBV, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BITREV4B", + name: "MOVHreg", argLen: 1, - asm: loong64.ABITREV4B, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BITREVW", + name: "MOVHUreg", argLen: 1, - asm: loong64.ABITREVW, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BITREVV", + name: "MOVWreg", argLen: 1, - asm: loong64.ABITREVV, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "VPCNT64", + name: "MOVWUreg", argLen: 1, - asm: loong64.AVPCNTV, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "VPCNT32", + name: "MOVDreg", argLen: 1, - asm: loong64.AVPCNTW, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "VPCNT16", - argLen: 1, - asm: loong64.AVPCNTH, + name: "MOVDnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDV", - argLen: 2, - commutative: true, - asm: loong64.AADDVU, + name: "SCVTFWS", + argLen: 1, + asm: arm64.ASCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AADDVU, + name: "SCVTFWD", + argLen: 1, + asm: arm64.ASCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBV", - argLen: 2, - asm: loong64.ASUBVU, + name: "UCVTFWS", + argLen: 1, + asm: arm64.AUCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASUBVU, + name: "UCVTFWD", + argLen: 1, + asm: arm64.AUCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULV", - argLen: 2, - commutative: true, - asm: loong64.AMULV, + name: "SCVTFS", + argLen: 1, + asm: arm64.ASCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHV", - argLen: 2, - commutative: true, - asm: loong64.AMULHV, + name: "SCVTFD", + argLen: 1, + asm: arm64.ASCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHVU", - argLen: 2, - commutative: true, - asm: loong64.AMULHVU, + name: "UCVTFS", + argLen: 1, + asm: arm64.AUCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVV", - argLen: 2, - asm: loong64.ADIVV, + name: "UCVTFD", + argLen: 1, + asm: arm64.AUCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVVU", - argLen: 2, - asm: loong64.ADIVVU, + name: "FCVTZSSW", + argLen: 1, + asm: arm64.AFCVTZSSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REMV", - argLen: 2, - asm: loong64.AREMV, + name: "FCVTZSDW", + argLen: 1, + asm: arm64.AFCVTZSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REMVU", - argLen: 2, - asm: loong64.AREMVU, + name: "FCVTZUSW", + argLen: 1, + asm: arm64.AFCVTZUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: loong64.AADDF, + name: "FCVTZUDW", + argLen: 1, + asm: arm64.AFCVTZUDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: loong64.AADDD, + name: "FCVTZSS", + argLen: 1, + asm: arm64.AFCVTZSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBF", - argLen: 2, - asm: loong64.ASUBF, + name: "FCVTZSD", + argLen: 1, + asm: arm64.AFCVTZSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBD", - argLen: 2, - asm: loong64.ASUBD, + name: "FCVTZUS", + argLen: 1, + asm: arm64.AFCVTZUS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: loong64.AMULF, + name: "FCVTZUD", + argLen: 1, + asm: arm64.AFCVTZUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: loong64.AMULD, + name: "FCVTSD", + argLen: 1, + asm: arm64.AFCVTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVF", - argLen: 2, - asm: loong64.ADIVF, + name: "FCVTDS", + argLen: 1, + asm: arm64.AFCVTDS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVD", - argLen: 2, - asm: loong64.ADIVD, + name: "FRINTAD", + argLen: 1, + asm: arm64.AFRINTAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: loong64.AAND, + name: "FRINTMD", + argLen: 1, + asm: arm64.AFRINTMD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AAND, + name: "FRINTND", + argLen: 1, + asm: arm64.AFRINTND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: loong64.AOR, + name: "FRINTPD", + argLen: 1, + asm: arm64.AFRINTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AOR, + name: "FRINTZD", + argLen: 1, + asm: arm64.AFRINTZD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: loong64.AXOR, + name: "CSEL", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AXOR, + name: "CSEL0", + auxType: auxCCop, + argLen: 2, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: loong64.ANOR, + name: "CSINC", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NORconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ANOR, + name: "CSINV", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDN", - argLen: 2, - asm: loong64.AANDN, + name: "CSNEG", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORN", - argLen: 2, - asm: loong64.AORN, + name: "CSETM", + auxType: auxCCop, + argLen: 1, + asm: arm64.ACSETM, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFMADDF, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFMADDD, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBF, + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 33554432}, // R26 + {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBD, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FNMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDF, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, + }, + }, + { + name: "Equal", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDD, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBF, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBD, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMINF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMINF, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMIND, + name: "GreaterEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMAXF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXF, + name: "LessThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXD, + name: "LessEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MASKEQZ", - argLen: 2, - asm: loong64.AMASKEQZ, + name: "GreaterThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MASKNEZ", - argLen: 2, - asm: loong64.AMASKNEZ, + name: "GreaterEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCOPYSGD", - argLen: 2, - asm: loong64.AFCOPYSGD, + name: "LessThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLL", - argLen: 2, - asm: loong64.ASLL, + name: "LessEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLLV", - argLen: 2, - asm: loong64.ASLLV, + name: "GreaterThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLLconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASLL, + name: "GreaterEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASLLV, + name: "NotLessThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRL", - argLen: 2, - asm: loong64.ASRL, + name: "NotLessEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRLV", - argLen: 2, - asm: loong64.ASRLV, + name: "NotGreaterThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRL, + name: "NotGreaterEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRLV, + name: "LessThanNoov", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRA", - argLen: 2, - asm: loong64.ASRA, + name: "GreaterEqualNoov", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRAV", - argLen: 2, - asm: loong64.ASRAV, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 524288}, // R20 }, + clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRA, + name: "LoweredZero", + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 65536}, // R16 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 65536, // R16 }, }, { - name: "SRAVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRAV, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "ROTR", - argLen: 2, - asm: loong64.AROTR, + name: "LoweredMove", + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 131072}, // R17 + {1, 65536}, // R16 + {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 }, + clobbers: 16973824, // R16 R17 R25 }, }, { - name: "ROTRV", - argLen: 2, - asm: loong64.AROTRV, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 33554432}, // R26 }, }, }, { - name: "ROTRconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTR, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ROTRVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTRV, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGT", - argLen: 2, - asm: loong64.ASGT, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, - }, + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, }, { - name: "SGTconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGT, + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LDAR", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDAR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTU", - argLen: 2, - asm: loong64.ASGTU, + name: "LDARB", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDARB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTUconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGTU, + name: "LDARW", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDARW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPEQF", - argLen: 2, - asm: loong64.ACMPEQF, + name: "STLRB", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLRB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: loong64.ACMPEQD, + name: "STLR", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: loong64.ACMPGEF, + name: "STLRW", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "CMPGED", - argLen: 2, - asm: loong64.ACMPGED, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: loong64.ACMPGTF, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: loong64.ACMPGTD, + name: "LoweredAtomicExchange8", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BSTRPICKW", - auxType: auxInt64, - argLen: 1, - asm: loong64.ABSTRPICKW, + name: "LoweredAtomicExchange64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BSTRPICKV", - auxType: auxInt64, - argLen: 1, - asm: loong64.ABSTRPICKV, + name: "LoweredAtomicExchange32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVV, + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVF, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVD, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: loong64.AMOVV, + name: "LoweredAtomicAdd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018427387908}, // SP SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVB, + name: "LoweredAtomicAdd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVBU, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVH, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVHU, + name: "LoweredAtomicCas64Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVW, + name: "LoweredAtomicCas32Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVWU, + name: "LoweredAtomicAnd8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVV, + name: "LoweredAtomicOr8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVF, + name: "LoweredAtomicAnd64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVD, + name: "LoweredAtomicOr64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVloadidx", - argLen: 3, - asm: loong64.AMOVV, + name: "LoweredAtomicAnd32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: loong64.AMOVW, + name: "LoweredAtomicOr32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: loong64.AMOVWU, + name: "LoweredAtomicAnd8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: loong64.AMOVH, + name: "LoweredAtomicOr8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: loong64.AMOVHU, + name: "LoweredAtomicAnd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: loong64.AMOVB, + name: "LoweredAtomicOr64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: loong64.AMOVBU, + name: "LoweredAtomicAnd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFloadidx", - argLen: 3, - asm: loong64.AMOVF, + name: "LoweredAtomicOr32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: loong64.AMOVD, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, + clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 16777216}, // R25 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "PRFM", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: arm64.APRFM, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVF, + name: "DMB", + auxType: auxInt64, + argLen: 1, + hasSideEffects: true, + asm: arm64.ADMB, + reg: regInfo{}, + }, + { + name: "ZERO", + argLen: 0, + zeroWidth: true, + fixedReg: true, + reg: regInfo{}, + }, + + { + name: "NEGV", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVD, + name: "NEGF", + argLen: 1, + asm: loong64.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: loong64.AMOVB, + name: "NEGD", + argLen: 1, + asm: loong64.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: loong64.AMOVH, + name: "SQRTD", + argLen: 1, + asm: loong64.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: loong64.AMOVW, + name: "SQRTF", + argLen: 1, + asm: loong64.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVstoreidx", - argLen: 4, - asm: loong64.AMOVV, + name: "ABSD", + argLen: 1, + asm: loong64.AABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVFstoreidx", - argLen: 4, - asm: loong64.AMOVF, + name: "CLZW", + argLen: 1, + asm: loong64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: loong64.AMOVD, + name: "CLZV", + argLen: 1, + asm: loong64.ACLZV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "CTZW", + argLen: 1, + asm: loong64.ACTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "CTZV", + argLen: 1, + asm: loong64.ACTZV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "REVB2H", + argLen: 1, + asm: loong64.AREVB2H, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstorezeroidx", - argLen: 3, - asm: loong64.AMOVB, + name: "REVB2W", + argLen: 1, + asm: loong64.AREVB2W, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHstorezeroidx", - argLen: 3, - asm: loong64.AMOVH, + name: "REVBV", + argLen: 1, + asm: loong64.AREVBV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWstorezeroidx", - argLen: 3, - asm: loong64.AMOVW, + name: "BITREV4B", + argLen: 1, + asm: loong64.ABITREV4B, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVstorezeroidx", - argLen: 3, - asm: loong64.AMOVV, + name: "BITREVW", + argLen: 1, + asm: loong64.ABITREVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWfpgp", + name: "BITREVV", argLen: 1, - asm: loong64.AMOVW, + asm: loong64.ABITREVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38632,12 +40642,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWgpfp", + name: "VPCNT64", argLen: 1, - asm: loong64.AMOVW, + asm: loong64.AVPCNTV, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38645,25 +40655,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVVfpgp", + name: "VPCNT32", argLen: 1, - asm: loong64.AMOVV, + asm: loong64.AVPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVgpfp", + name: "VPCNT16", argLen: 1, - asm: loong64.AMOVV, + asm: loong64.AVPCNTH, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38671,12 +40681,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBreg", - argLen: 1, - asm: loong64.AMOVB, + name: "ADDV", + argLen: 2, + commutative: true, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38684,12 +40696,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUreg", - argLen: 1, - asm: loong64.AMOVBU, + name: "ADDVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38697,12 +40710,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHreg", - argLen: 1, - asm: loong64.AMOVH, + name: "SUBV", + argLen: 2, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38710,9 +40724,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUreg", - argLen: 1, - asm: loong64.AMOVHU, + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 @@ -38723,12 +40738,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWreg", - argLen: 1, - asm: loong64.AMOVW, + name: "MULV", + argLen: 2, + commutative: true, + asm: loong64.AMULV, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38736,12 +40753,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWUreg", - argLen: 1, - asm: loong64.AMOVWU, + name: "MULHV", + argLen: 2, + commutative: true, + asm: loong64.AMULHV, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38749,12 +40768,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVVreg", - argLen: 1, - asm: loong64.AMOVV, + name: "MULHVU", + argLen: 2, + commutative: true, + asm: loong64.AMULHVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38762,12 +40783,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVVnop", - argLen: 1, - resultInArg0: true, + name: "DIVV", + argLen: 2, + asm: loong64.ADIVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38775,51 +40797,56 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWF", - argLen: 1, - asm: loong64.AMOVWF, + name: "DIVVU", + argLen: 2, + asm: loong64.ADIVVU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: loong64.AMOVWD, + name: "REMV", + argLen: 2, + asm: loong64.AREMV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVF", - argLen: 1, - asm: loong64.AMOVVF, + name: "REMVU", + argLen: 2, + asm: loong64.AREMVU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVD", - argLen: 1, - asm: loong64.AMOVVD, + name: "ADDF", + argLen: 2, + commutative: true, + asm: loong64.AADDF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38827,12 +40854,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCFW", - argLen: 1, - asm: loong64.ATRUNCFW, + name: "ADDD", + argLen: 2, + commutative: true, + asm: loong64.AADDD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38840,12 +40869,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCDW", - argLen: 1, - asm: loong64.ATRUNCDW, + name: "SUBF", + argLen: 2, + asm: loong64.ASUBF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38853,12 +40883,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCFV", - argLen: 1, - asm: loong64.ATRUNCFV, + name: "SUBD", + argLen: 2, + asm: loong64.ASUBD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38866,12 +40897,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCDV", - argLen: 1, - asm: loong64.ATRUNCDV, + name: "MULF", + argLen: 2, + commutative: true, + asm: loong64.AMULF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38879,12 +40912,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVFD", - argLen: 1, - asm: loong64.AMOVFD, + name: "MULD", + argLen: 2, + commutative: true, + asm: loong64.AMULD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38892,12 +40927,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDF", - argLen: 1, - asm: loong64.AMOVDF, + name: "DIVF", + argLen: 2, + asm: loong64.ADIVF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38905,12 +40941,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, + name: "DIVD", + argLen: 2, + asm: loong64.ADIVD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38918,127 +40955,129 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, + name: "AND", + argLen: 2, + commutative: true, + asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AAND, reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "OR", + argLen: 2, + commutative: true, + asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ - {1, 268435456}, // R29 - {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 524290, // R1 R20 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 1572866, // R1 R20 R21 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "NOR", + argLen: 2, + commutative: true, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 524288, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 - {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 1572864, // R20 R21 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "ANDN", + argLen: 2, + asm: loong64.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39046,12 +41085,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "ORN", + argLen: 2, + asm: loong64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39059,182 +41099,205 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "FMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFMADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore8Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore32Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore64Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, + name: "FMINF", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMINF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, + name: "FMIND", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, + name: "FMAXF", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, + name: "FMAXD", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MASKEQZ", + argLen: 2, + asm: loong64.AMASKEQZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39242,17 +41305,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MASKNEZ", + argLen: 2, + asm: loong64.AMASKNEZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39260,35 +41319,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FCOPYSGD", + argLen: 2, + asm: loong64.AFCOPYSGD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "SLL", + argLen: 2, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39296,16 +41347,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "SLLV", + argLen: 2, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39313,16 +41361,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39330,16 +41375,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39347,16 +41389,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBV, + name: "SRL", + argLen: 2, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39364,16 +41403,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "SRLV", + argLen: 2, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39381,16 +41417,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBV, + name: "SRLconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39398,1481 +41431,1480 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "SRLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "FPFlagTrue", - argLen: 1, - reg: regInfo{ outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "SRA", + argLen: 2, + asm: loong64.ASRA, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "SRAV", + argLen: 2, + asm: loong64.ASRAV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "SRAconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRA, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRAV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "ROTR", + argLen: 2, + asm: loong64.AROTR, reg: regInfo{ - clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: loong64.ADBAR, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "ROTRV", + argLen: 2, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 4194304}, // R23 - {1, 8388608}, // R24 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsB", + name: "ROTRconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 4194304}, // R23 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsC", + name: "ROTRVconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1048576}, // R21 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "PRELD", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: loong64.APRELD, + name: "SGT", + argLen: 2, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "PRELDX", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: loong64.APRELDX, + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: mips.AADDU, + name: "SGTU", + argLen: 2, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDconst", - auxType: auxInt32, + name: "SGTUconst", + auxType: auxInt64, argLen: 1, - asm: mips.AADDU, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUB", + name: "CMPEQF", argLen: 2, - asm: mips.ASUBU, + asm: loong64.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASUBU, + name: "CMPEQD", + argLen: 2, + asm: loong64.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "CMPGEF", + argLen: 2, + asm: loong64.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - clobbers: 105553116266496, // HI LO - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULT", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "CMPGED", + argLen: 2, + asm: loong64.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULTU", - argLen: 2, - commutative: true, - asm: mips.AMULU, + name: "CMPGTF", + argLen: 2, + asm: loong64.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIV", + name: "CMPGTD", argLen: 2, - asm: mips.ADIV, + asm: loong64.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVU", - argLen: 2, - asm: mips.ADIVU, + name: "BSTRPICKW", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: mips.AADDF, + name: "BSTRPICKV", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: mips.AADDD, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUBF", - argLen: 2, - asm: mips.ASUBF, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBD", - argLen: 2, - asm: mips.ASUBD, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: mips.AMULF, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686018427387908}, // SP SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: mips.AMULD, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "DIVF", - argLen: 2, - asm: mips.ADIVF, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "DIVD", - argLen: 2, - asm: mips.ADIVD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: mips.AAND, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ANDconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AAND, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: mips.AOR, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AOR, + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: mips.AXOR, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AXOR, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: mips.ANOR, + name: "MOVVloadidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ANOR, + name: "MOVWloadidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEG", - argLen: 1, + name: "MOVWUloadidx", + argLen: 3, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEGF", - argLen: 1, - asm: mips.ANEGF, + name: "MOVHloadidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEGD", - argLen: 1, - asm: mips.ANEGD, + name: "MOVHUloadidx", + argLen: 3, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ABSD", - argLen: 1, - asm: mips.AABSD, + name: "MOVBloadidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: mips.ASQRTD, + name: "MOVBUloadidx", + argLen: 3, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: mips.ASQRTF, + name: "MOVFloadidx", + argLen: 3, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLL", - argLen: 2, - asm: mips.ASLL, + name: "MOVDloadidx", + argLen: 3, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASLL, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRL", - argLen: 2, - asm: mips.ASRL, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRL, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRA", - argLen: 2, - asm: mips.ASRA, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRA, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZ", - argLen: 1, - asm: mips.ACLZ, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SGT", - argLen: 2, - asm: mips.ASGT, + name: "MOVBstoreidx", + argLen: 4, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGT, + name: "MOVHstoreidx", + argLen: 4, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTzero", - argLen: 1, - asm: mips.ASGT, + name: "MOVWstoreidx", + argLen: 4, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTU", - argLen: 2, - asm: mips.ASGTU, + name: "MOVVstoreidx", + argLen: 4, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTUconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGTU, + name: "MOVFstoreidx", + argLen: 4, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SGTUzero", - argLen: 1, - asm: mips.ASGTU, + name: "MOVDstoreidx", + argLen: 4, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPEQF", - argLen: 2, - asm: mips.ACMPEQF, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: mips.ACMPEQD, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: mips.ACMPGEF, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGED", - argLen: 2, - asm: mips.ACMPGED, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: mips.ACMPGTF, + name: "MOVBstorezeroidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: mips.ACMPGTD, + name: "MOVHstorezeroidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVW, - reg: regInfo{ - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - }, - }, - }, - { - name: "MOVFconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVF, - reg: regInfo{ - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - }, - }, - { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVD, - reg: regInfo{ - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: mips.AMOVW, + name: "MOVWstorezeroidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140737555464192}, // SP SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVB, + name: "MOVVstorezeroidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVBU, + name: "MOVWfpgp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVH, + name: "MOVWgpfp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVHU, + name: "MOVVfpgp", + argLen: 1, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVW, + name: "MOVVgpfp", + argLen: 1, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVF, + name: "MOVBreg", + argLen: 1, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVD, + name: "MOVBUreg", + argLen: 1, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - }, - }, - { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, - }, - { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, - }, - { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVF, - reg: regInfo{ - inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVD, + name: "MOVHreg", + argLen: 1, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "MOVHUreg", + argLen: 1, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWfpgp", + name: "MOVWreg", argLen: 1, - asm: mips.AMOVW, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWgpfp", + name: "MOVWUreg", argLen: 1, - asm: mips.AMOVW, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBreg", + name: "MOVVreg", argLen: 1, - asm: mips.AMOVB, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: mips.AMOVBU, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHreg", + name: "MOVWF", argLen: 1, - asm: mips.AMOVH, + asm: loong64.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUreg", + name: "MOVWD", argLen: 1, - asm: mips.AMOVHU, + asm: loong64.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWreg", + name: "MOVVF", argLen: 1, - asm: mips.AMOVW, + asm: loong64.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "MOVVD", + argLen: 1, + asm: loong64.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMOVZ", - argLen: 3, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "TRUNCFW", + argLen: 1, + asm: loong64.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMOVZzero", - argLen: 2, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "TRUNCDW", + argLen: 1, + asm: loong64.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWF", + name: "TRUNCFV", argLen: 1, - asm: mips.AMOVWF, + asm: loong64.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWD", + name: "TRUNCDV", argLen: 1, - asm: mips.AMOVWD, + asm: loong64.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TRUNCFW", + name: "MOVFD", argLen: 1, - asm: mips.ATRUNCFW, + asm: loong64.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TRUNCDW", + name: "MOVDF", argLen: 1, - asm: mips.ATRUNCDW, + asm: loong64.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: mips.AMOVDF, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { name: "CALLstatic", auxType: auxCallOff, - argLen: 1, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLtail", auxType: auxCallOff, - argLen: 1, + argLen: -1, clobberFlags: true, call: true, tailCall: true, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLclosure", auxType: auxCallOff, - argLen: 3, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 4194304}, // R22 - {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 + {1, 268435456}, // R29 + {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLinter", auxType: auxCallOff, - argLen: 2, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "LoweredAtomicLoad8", + name: "DUFFZERO", + auxType: auxInt64, argLen: 2, faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 524288}, // R20 }, + clobbers: 524290, // R1 R20 }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 1572866, // R1 R20 R21 }, }, { - name: "LoweredAtomicStore8", + name: "LoweredZero", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, - hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 524288}, // R20 + {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 524288, // R20 }, }, { - name: "LoweredAtomicStore32", - argLen: 3, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, faultOnNilArg0: true, - hasSideEffects: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1048576}, // R21 + {1, 524288}, // R20 + {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 1572864, // R20 R21 }, }, { - name: "LoweredAtomicStorezero", + name: "LoweredAtomicLoad8", argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAdd", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAddconst", - auxType: auxInt32, - argLen: 2, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore8Variant", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32Variant", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64Variant", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas", + name: "LoweredAtomicCas64", argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, @@ -40880,69 +42912,151 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, + name: "LoweredAtomicCas64Variant", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, + name: "LoweredAtomicCas32Variant", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredZero", - auxType: auxInt32, - argLen: 3, - faultOnNilArg0: true, + name: "LoweredAtomicAnd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMANDDBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt32, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "LoweredAtomicOr32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAnd32value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMANDDBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAnd64value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMANDDBV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicOr32value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicOr64value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 6, // R1 R2 }, }, { @@ -40952,7 +43066,7 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -40961,7 +43075,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -40970,7 +43084,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -40980,7 +43094,7 @@ var opcodeTable = [...]opInfo{ zeroWidth: true, reg: regInfo{ outputs: []outputInfo{ - {0, 4194304}, // R22 + {0, 268435456}, // R29 }, }, }, @@ -40990,7 +43104,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -41000,7 +43114,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -41010,9 +43124,9 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 268435456}, // R29 }, }, }, @@ -41020,7 +43134,7 @@ var opcodeTable = [...]opInfo{ name: "LoweredPubBarrier", argLen: 1, hasSideEffects: true, - asm: mips.ASYNC, + asm: loong64.ADBAR, reg: regInfo{}, }, { @@ -41030,8 +43144,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 4194304}, // R23 + {1, 8388608}, // R24 }, }, }, @@ -41042,8 +43156,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 1048576}, // R21 + {1, 4194304}, // R23 }, }, }, @@ -41054,167 +43168,168 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 - }, - }, - }, - { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 8}, // R3 - {2, 16}, // R4 + {0, 524288}, // R20 + {1, 1048576}, // R21 }, }, }, { - name: "LoweredPanicExtendB", - auxType: auxInt64, - argLen: 4, - call: true, + name: "PRELD", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELD, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, + name: "PRELDX", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELDX, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 2}, // R1 - {2, 4}, // R2 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDV", + name: "ADD", argLen: 2, commutative: true, - asm: mips.AADDVU, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADDVconst", - auxType: auxInt64, + name: "ADDconst", + auxType: auxInt32, argLen: 1, - asm: mips.AADDVU, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 + {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SUBV", + name: "SUB", argLen: 2, - asm: mips.ASUBVU, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, + name: "SUBconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASUBVU, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULV", + name: "MUL", argLen: 2, commutative: true, - asm: mips.AMULV, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, + clobbers: 105553116266496, // HI LO outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULVU", + name: "MULT", argLen: 2, commutative: true, - asm: mips.AMULVU, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "DIVV", + name: "MULTU", + argLen: 2, + commutative: true, + asm: mips.AMULU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO + }, + }, + }, + { + name: "DIV", argLen: 2, - asm: mips.ADIVV, + asm: mips.ADIV, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "DIVVU", + name: "DIVU", argLen: 2, - asm: mips.ADIVVU, + asm: mips.ADIVU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, @@ -41225,11 +43340,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41240,11 +43355,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41254,11 +43369,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41268,11 +43383,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41283,11 +43398,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41298,11 +43413,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41312,11 +43427,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41326,11 +43441,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41341,25 +43456,25 @@ var opcodeTable = [...]opInfo{ asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "ANDconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41370,25 +43485,25 @@ var opcodeTable = [...]opInfo{ asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "ORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41399,25 +43514,25 @@ var opcodeTable = [...]opInfo{ asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "XORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41428,37 +43543,37 @@ var opcodeTable = [...]opInfo{ asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "NORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "NEGV", + name: "NEG", argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41468,10 +43583,10 @@ var opcodeTable = [...]opInfo{ asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41481,10 +43596,10 @@ var opcodeTable = [...]opInfo{ asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41494,10 +43609,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41507,10 +43622,10 @@ var opcodeTable = [...]opInfo{ asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41520,94 +43635,107 @@ var opcodeTable = [...]opInfo{ asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SLLV", + name: "SLL", argLen: 2, - asm: mips.ASLLV, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, + name: "SLLconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASLLV, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRLV", + name: "SRL", argLen: 2, - asm: mips.ASRLV, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, + name: "SRLconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASRLV, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRAV", + name: "SRA", argLen: 2, - asm: mips.ASRAV, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRAVconst", - auxType: auxInt64, + name: "SRAconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASRAV, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "CLZ", + argLen: 1, + asm: mips.ACLZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41617,25 +43745,38 @@ var opcodeTable = [...]opInfo{ asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "SGTconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTzero", + argLen: 1, + asm: mips.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41645,25 +43786,38 @@ var opcodeTable = [...]opInfo{ asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "SGTUconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTUzero", + argLen: 1, + asm: mips.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41673,8 +43827,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41684,8 +43838,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41695,8 +43849,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41706,8 +43860,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41717,8 +43871,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41728,32 +43882,32 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVconst", - auxType: auxInt64, + name: "MOVWconst", + auxType: auxInt32, argLen: 0, rematerializeable: true, - asm: mips.AMOVV, + asm: mips.AMOVW, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "MOVFconst", - auxType: auxFloat64, + auxType: auxFloat32, argLen: 0, rematerializeable: true, asm: mips.AMOVF, reg: regInfo{ outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41765,23 +43919,23 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVaddr", + name: "MOVWaddr", auxType: auxSymOff, argLen: 1, rematerializeable: true, symEffect: SymAddr, - asm: mips.AMOVV, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018460942336}, // SP SB + {0, 140737555464192}, // SP SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41794,10 +43948,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41810,10 +43964,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41826,10 +43980,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41842,10 +43996,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41858,42 +44012,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVWU, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41906,10 +44028,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41922,10 +44044,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41938,8 +44060,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -41952,8 +44074,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -41966,22 +44088,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -41994,8 +44102,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42008,8 +44116,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42022,7 +44130,7 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42035,7 +44143,7 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42048,20 +44156,7 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42071,10 +44166,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42084,36 +44179,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "MOVVfpgp", - argLen: 1, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "MOVVgpfp", - argLen: 1, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42123,10 +44192,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42136,10 +44205,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42149,10 +44218,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42162,10 +44231,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42175,49 +44244,54 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: mips.AMOVWU, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MOVVreg", - argLen: 1, - asm: mips.AMOVV, + name: "CMOVZ", + argLen: 3, + resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MOVVnop", - argLen: 1, + name: "CMOVZzero", + argLen: 2, resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42227,10 +44301,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42240,101 +44314,49 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVF", + name: "TRUNCFW", argLen: 1, - asm: mips.AMOVVF, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVD", + name: "TRUNCDW", argLen: 1, - asm: mips.AMOVVD, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "TRUNCFW", + name: "MOVFD", argLen: 1, - asm: mips.ATRUNCFW, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "TRUNCDW", - argLen: 1, - asm: mips.ATRUNCDW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "TRUNCFV", - argLen: 1, - asm: mips.ATRUNCFV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "TRUNCDV", - argLen: 1, - asm: mips.ATRUNCDV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42344,10 +44366,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42358,7 +44380,7 @@ var opcodeTable = [...]opInfo{ clobberFlags: true, call: true, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42369,7 +44391,7 @@ var opcodeTable = [...]opInfo{ call: true, tailCall: true, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42381,9 +44403,9 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 4194304}, // R22 - {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 + {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42394,93 +44416,9 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO - }, - }, - { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 134217730, // R1 R31 - }, - }, - { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - }, - clobbers: 134217734, // R1 R2 R31 - }, - }, - { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2}, // R1 - {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 2, // R1 - }, - }, - { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 6, // R1 R2 - }, - }, - { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42489,10 +44427,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42502,23 +44440,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42529,8 +44454,8 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42541,47 +44466,24 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "LoweredAtomicStorezero32", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicStorezero64", + name: "LoweredAtomicStorezero", argLen: 2, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicExchange32", + name: "LoweredAtomicExchange", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, @@ -42589,16 +44491,16 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicExchange64", + name: "LoweredAtomicAdd", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, @@ -42606,116 +44508,103 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, + name: "LoweredAtomicAddconst", + auxType: auxInt32, + argLen: 2, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, + name: "LoweredAtomicCas", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicAddconst32", - auxType: auxInt32, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicAnd", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicAddconst64", - auxType: auxInt64, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicOr", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredZero", + auxType: auxInt32, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 2}, // R1 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 2, // R1 }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredMove", + auxType: auxInt32, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 6, // R1 R2 }, }, { @@ -42725,7 +44614,7 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, }, }, @@ -42734,7 +44623,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42743,7 +44632,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42763,7 +44652,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42773,7 +44662,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42783,7 +44672,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO outputs: []outputInfo{ {0, 16777216}, // R25 }, @@ -42832,1743 +44721,1789 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: ppc64.AADD, + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 32}, // R5 + {1, 8}, // R3 + {2, 16}, // R4 }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + { + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "ADDCC", + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 2}, // R1 + {2, 4}, // R2 + }, + }, + }, + + { + name: "ADDV", argLen: 2, commutative: true, - asm: ppc64.AADDCC, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ADDconst", + name: "ADDVconst", auxType: auxInt64, argLen: 1, - asm: ppc64.AADD, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ADDCCconst", + name: "SUBV", + argLen: 2, + asm: mips.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SUBVconst", auxType: auxInt64, argLen: 1, - asm: ppc64.AADDCCC, + asm: mips.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FADD", + name: "MULV", argLen: 2, commutative: true, - asm: ppc64.AFADD, + asm: mips.AMULV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "FADDS", + name: "MULVU", argLen: 2, commutative: true, - asm: ppc64.AFADDS, + asm: mips.AMULVU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "SUB", + name: "DIVV", argLen: 2, - asm: ppc64.ASUB, + asm: mips.ADIVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "SUBCC", + name: "DIVVU", argLen: 2, - asm: ppc64.ASUBCC, + asm: mips.ADIVVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "SUBFCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSUB", - argLen: 2, - asm: ppc64.AFSUB, + name: "ADDD", + argLen: 2, + commutative: true, + asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSUBS", + name: "SUBF", argLen: 2, - asm: ppc64.AFSUBS, + asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMINJDP", + name: "SUBD", argLen: 2, - asm: ppc64.AXSMINJDP, + asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMAXJDP", - argLen: 2, - asm: ppc64.AXSMAXJDP, - reg: regInfo{ + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, + reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLD", + name: "MULD", argLen: 2, commutative: true, - asm: ppc64.AMULLD, + asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - asm: ppc64.AMULLW, + name: "DIVF", + argLen: 2, + asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLD, + name: "DIVD", + argLen: 2, + asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLWconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLW, + name: "AND", + argLen: 2, + commutative: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MADDLD", - argLen: 3, - asm: ppc64.AMADDLD, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHD", + name: "OR", argLen: 2, commutative: true, - asm: ppc64.AMULHD, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHW", - argLen: 2, - commutative: true, - asm: ppc64.AMULHW, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHDU", + name: "XOR", argLen: 2, commutative: true, - asm: ppc64.AMULHDU, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHDUCC", - argLen: 2, - commutative: true, - asm: ppc64.AMULHDUCC, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHWU", + name: "NOR", argLen: 2, commutative: true, - asm: ppc64.AMULHWU, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FMUL", - argLen: 2, - commutative: true, - asm: ppc64.AFMUL, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: ppc64.AFMULS, + name: "NEGV", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FMADD", - argLen: 3, - asm: ppc64.AFMADD, + name: "NEGF", + argLen: 1, + asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMADDS", - argLen: 3, - asm: ppc64.AFMADDS, + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUB", - argLen: 3, - asm: ppc64.AFMSUB, + name: "ABSD", + argLen: 1, + asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: ppc64.AFMSUBS, + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAD", - argLen: 2, - asm: ppc64.ASRAD, + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAW", + name: "SLLV", argLen: 2, - asm: ppc64.ASRAW, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRD", - argLen: 2, - asm: ppc64.ASRD, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRW", + name: "SRLV", argLen: 2, - asm: ppc64.ASRW, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLD", - argLen: 2, - asm: ppc64.ASLD, + name: "SRLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLW", + name: "SRAV", argLen: 2, - asm: ppc64.ASLW, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTL", - argLen: 2, - asm: ppc64.AROTL, + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTLW", + name: "SGT", argLen: 2, - asm: ppc64.AROTLW, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLRLSLWI", - auxType: auxInt32, + name: "SGTconst", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACLRLSLWI, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLRLSLDI", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACLRLSLDI, + name: "SGTU", + argLen: 2, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ADDC", - argLen: 2, - commutative: true, - asm: ppc64.AADDC, + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SUBC", + name: "CMPEQF", argLen: 2, - asm: ppc64.ASUBC, + asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADDC, + name: "CMPEQD", + argLen: 2, + asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "CMPGEF", + argLen: 2, + asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - asm: ppc64.AADDE, + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDZE", + name: "CMPGTF", argLen: 2, - asm: ppc64.AADDZE, + asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBE", - argLen: 3, - asm: ppc64.ASUBE, + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDZEzero", - argLen: 1, - asm: ppc64.AADDZE, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SUBZEzero", - argLen: 1, - asm: ppc64.ASUBZE, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRADconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAD, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAW, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018460942336}, // SP SB }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRD, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRW, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLW, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTL, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTLW, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "EXTSWSLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AEXTSWSLI, + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLWINM", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLWNM, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLWNM", - auxType: auxInt64, - argLen: 2, - asm: ppc64.ARLWNM, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLWMI", - auxType: auxInt64, - argLen: 2, - resultInArg0: true, - asm: ppc64.ARLWMI, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "RLDICL", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICL, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "RLDICLCC", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICLCC, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "RLDICR", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICR, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CNTLZD", - argLen: 1, - asm: ppc64.ACNTLZD, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZDCC", - argLen: 1, - asm: ppc64.ACNTLZDCC, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZW", - argLen: 1, - asm: ppc64.ACNTLZW, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CNTTZD", - argLen: 1, - asm: ppc64.ACNTTZD, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CNTTZW", - argLen: 1, - asm: ppc64.ACNTTZW, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "POPCNTD", - argLen: 1, - asm: ppc64.APOPCNTD, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "POPCNTW", + name: "MOVWfpgp", argLen: 1, - asm: ppc64.APOPCNTW, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "POPCNTB", + name: "MOVWgpfp", argLen: 1, - asm: ppc64.APOPCNTB, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FDIV", - argLen: 2, - asm: ppc64.AFDIV, + name: "MOVVfpgp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: ppc64.AFDIVS, + name: "MOVVgpfp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVD", - argLen: 2, - asm: ppc64.ADIVD, + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "DIVW", - argLen: 2, - asm: ppc64.ADIVW, + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "DIVDU", - argLen: 2, - asm: ppc64.ADIVDU, + name: "MOVHreg", + argLen: 1, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "DIVWU", - argLen: 2, - asm: ppc64.ADIVWU, + name: "MOVHUreg", + argLen: 1, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODUD", - argLen: 2, - asm: ppc64.AMODUD, + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODSD", - argLen: 2, - asm: ppc64.AMODSD, + name: "MOVWUreg", + argLen: 1, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODUW", - argLen: 2, - asm: ppc64.AMODUW, + name: "MOVVreg", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODSW", - argLen: 2, - asm: ppc64.AMODSW, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCTIDZ", + name: "MOVWF", argLen: 1, - asm: ppc64.AFCTIDZ, + asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCTIWZ", + name: "MOVWD", argLen: 1, - asm: ppc64.AFCTIWZ, + asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFID", + name: "MOVVF", argLen: 1, - asm: ppc64.AFCFID, + asm: mips.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFIDS", + name: "MOVVD", argLen: 1, - asm: ppc64.AFCFIDS, + asm: mips.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FRSP", + name: "TRUNCFW", argLen: 1, - asm: ppc64.AFRSP, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MFVSRD", + name: "TRUNCDW", argLen: 1, - asm: ppc64.AMFVSRD, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MTVSRD", + name: "TRUNCFV", argLen: 1, - asm: ppc64.AMTVSRD, + asm: mips.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: ppc64.AAND, + name: "TRUNCDV", + argLen: 1, + asm: mips.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDN", - argLen: 2, - asm: ppc64.AANDN, + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDNCC", - argLen: 2, - asm: ppc64.AANDNCC, + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDCC", - argLen: 2, - commutative: true, - asm: ppc64.AANDCC, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: ppc64.AOR, + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "ORN", - argLen: 2, - asm: ppc64.AORN, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 4194304}, // R22 + {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "ORCC", - argLen: 2, - commutative: true, - asm: ppc64.AORCC, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: ppc64.ANOR, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 134217730, // R1 R31 }, }, { - name: "NORCC", - argLen: 2, - commutative: true, - asm: ppc64.ANORCC, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 134217734, // R1 R2 R31 }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: ppc64.AXOR, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 2}, // R1 + {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 2, // R1 }, }, { - name: "XORCC", - argLen: 2, - commutative: true, - asm: ppc64.AXORCC, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 6, // R1 R2 }, }, { - name: "EQV", - argLen: 2, - commutative: true, - asm: ppc64.AEQV, + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "NEG", - argLen: 1, - asm: ppc64.ANEG, + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "NEGCC", - argLen: 1, - asm: ppc64.ANEGCC, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "BRD", - argLen: 1, - asm: ppc64.ABRD, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "BRW", - argLen: 1, - asm: ppc64.ABRW, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "BRH", - argLen: 1, - asm: ppc64.ABRH, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "FNEG", - argLen: 1, - asm: ppc64.AFNEG, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "FSQRT", - argLen: 1, - asm: ppc64.AFSQRT, + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: ppc64.AFSQRTS, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FFLOOR", - argLen: 1, - asm: ppc64.AFRIM, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCEIL", - argLen: 1, - asm: ppc64.AFRIP, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FTRUNC", - argLen: 1, - asm: ppc64.AFRIZ, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FROUND", - argLen: 1, - asm: ppc64.AFRIN, + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FABS", - argLen: 1, - asm: ppc64.AFABS, + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FNABS", - argLen: 1, - asm: ppc64.AFNABS, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCPSGN", - argLen: 2, - asm: ppc64.AFCPSGN, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AOR, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AXOR, + name: "FPFlagFalse", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4194304}, // R22 }, }, }, { - name: "ANDCCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AANDCC, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ANDconst", + name: "LoweredWB", auxType: auxInt64, argLen: 1, clobberFlags: true, - asm: ppc64.AANDCC, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 16777216}, // R25 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: ppc64.AMOVB, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: mips.ASYNC, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 8}, // R3 + {1, 16}, // R4 }, }, }, { - name: "MOVBZreg", - argLen: 1, - asm: ppc64.AMOVBZ, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: ppc64.AMOVH, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, + { - name: "MOVHZreg", - argLen: 1, - asm: ppc64.AMOVHZ, + name: "ADD", + argLen: 2, + commutative: true, + asm: ppc64.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44576,12 +46511,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWreg", - argLen: 1, - asm: ppc64.AMOVW, + name: "ADDCC", + argLen: 2, + commutative: true, + asm: ppc64.AADDCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44589,9 +46526,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZreg", - argLen: 1, - asm: ppc64.AMOVWZ, + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44602,63 +46540,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVBZ, + name: "ADDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDCCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVH, + name: "FADD", + argLen: 2, + commutative: true, + asm: ppc64.AFADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVHZ, + name: "FADDS", + argLen: 2, + commutative: true, + asm: ppc64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVW, + name: "SUB", + argLen: 2, + asm: ppc64.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44666,15 +46599,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVWZ, + name: "SUBCC", + argLen: 2, + asm: ppc64.ASUBCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44682,85 +46613,85 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVD, + name: "SUBFCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "FSUB", + argLen: 2, + asm: ppc64.AFSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "FSUBS", + argLen: 2, + asm: ppc64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "XSMINJDP", + argLen: 2, + asm: ppc64.AXSMINJDP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVBZloadidx", - argLen: 3, - asm: ppc64.AMOVBZ, + name: "XSMAXJDP", + argLen: 2, + asm: ppc64.AXSMAXJDP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: ppc64.AMOVH, + name: "MULLD", + argLen: 2, + commutative: true, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44768,13 +46699,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHZloadidx", - argLen: 3, - asm: ppc64.AMOVHZ, + name: "MULLW", + argLen: 2, + commutative: true, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44782,12 +46714,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: ppc64.AMOVW, + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -44796,12 +46728,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZloadidx", - argLen: 3, - asm: ppc64.AMOVWZ, + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -44810,13 +46742,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDloadidx", + name: "MADDLD", argLen: 3, - asm: ppc64.AMOVD, + asm: ppc64.AMADDLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44824,13 +46757,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHBRloadidx", - argLen: 3, - asm: ppc64.AMOVHBR, + name: "MULHD", + argLen: 2, + commutative: true, + asm: ppc64.AMULHD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44838,13 +46772,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWBRloadidx", - argLen: 3, - asm: ppc64.AMOVWBR, + name: "MULHW", + argLen: 2, + commutative: true, + asm: ppc64.AMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44852,13 +46787,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBRloadidx", - argLen: 3, - asm: ppc64.AMOVDBR, + name: "MULHDU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDU, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44866,91 +46802,104 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDloadidx", - argLen: 3, - asm: ppc64.AFMOVD, + name: "MULHDUCC", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDUCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: ppc64.AFMOVS, + name: "MULHWU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHWU, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DCBT", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: ppc64.ADCBT, + name: "FMUL", + argLen: 2, + commutative: true, + asm: ppc64.AFMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVDBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "FMULS", + argLen: 2, + commutative: true, + asm: ppc64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "FMADD", + argLen: 3, + asm: ppc64.AFMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "FMADDS", + argLen: 3, + asm: ppc64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVD, + name: "FMSUB", + argLen: 3, + asm: ppc64.AFMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 @@ -44958,15 +46907,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVS, + name: "FMSUBS", + argLen: 3, + asm: ppc64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 @@ -44974,409 +46922,440 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "SRAD", + argLen: 2, + asm: ppc64.ASRAD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "SRAW", + argLen: 2, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "SRD", + argLen: 2, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "SRW", + argLen: 2, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - }, - }, - { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: ppc64.AMOVB, + name: "SLD", + argLen: 2, + asm: ppc64.ASLD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: ppc64.AMOVH, + name: "SLW", + argLen: 2, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: ppc64.AMOVW, + name: "ROTL", + argLen: 2, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: ppc64.AMOVD, + name: "ROTLW", + argLen: 2, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: ppc64.AFMOVD, + name: "CLRLSLWI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLWI, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: ppc64.AFMOVS, + name: "CLRLSLDI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLDI, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHBRstoreidx", - argLen: 4, - asm: ppc64.AMOVHBR, + name: "ADDC", + argLen: 2, + commutative: true, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWBRstoreidx", - argLen: 4, - asm: ppc64.AMOVWBR, + name: "SUBC", + argLen: 2, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDBRstoreidx", - argLen: 4, - asm: ppc64.AMOVDBR, + name: "ADDCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "SUBCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "ADDE", + argLen: 3, + commutative: true, + asm: ppc64.AADDE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "ADDZE", + argLen: 2, + asm: ppc64.AADDZE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "SUBE", + argLen: 3, + asm: ppc64.ASUBE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: ppc64.AMOVD, + name: "ADDZEzero", + argLen: 1, + asm: ppc64.AADDZE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372036854775808}, // XER }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AMOVD, + name: "SUBZEzero", + argLen: 1, + asm: ppc64.ASUBZE, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372036854775808}, // XER + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVD, - reg: regInfo{ - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - }, - }, - { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVS, + name: "SRADconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRAD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FCMPU", - argLen: 2, - asm: ppc64.AFCMPU, + name: "SRAWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMP", - argLen: 2, - asm: ppc64.ACMP, + name: "SRDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPU", - argLen: 2, - asm: ppc64.ACMPU, + name: "SRWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPW", - argLen: 2, - asm: ppc64.ACMPW, + name: "SLDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASLD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: ppc64.ACMPWU, + name: "SLWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPconst", + name: "ROTLconst", auxType: auxInt64, argLen: 1, - asm: ppc64.ACMP, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "CMPUconst", + name: "ROTLWconst", auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPU, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "CMPWconst", - auxType: auxInt32, + name: "EXTSWSLconst", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPW, + asm: ppc64.AEXTSWSLI, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, + name: "RLWINM", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPWU, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "ISEL", - auxType: auxInt32, - argLen: 3, - asm: ppc64.AISEL, + name: "RLWNM", + auxType: auxInt64, + argLen: 2, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45384,13 +47363,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ISELZ", - auxType: auxInt32, - argLen: 2, - asm: ppc64.AISEL, + name: "RLWMI", + auxType: auxInt64, + argLen: 2, + resultInArg0: true, + asm: ppc64.ARLWMI, reg: regInfo{ inputs: []inputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45398,396 +47379,434 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SETBC", - auxType: auxInt32, + name: "RLDICL", + auxType: auxInt64, argLen: 1, - asm: ppc64.ASETBC, + asm: ppc64.ARLDICL, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SETBCR", - auxType: auxInt32, + name: "RLDICLCC", + auxType: auxInt64, argLen: 1, - asm: ppc64.ASETBCR, + asm: ppc64.ARLDICLCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "Equal", - argLen: 1, + name: "RLDICR", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLDICR, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "NotEqual", + name: "CNTLZD", argLen: 1, + asm: ppc64.ACNTLZD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LessThan", + name: "CNTLZDCC", argLen: 1, + asm: ppc64.ACNTLZDCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FLessThan", + name: "CNTLZW", argLen: 1, + asm: ppc64.ACNTLZW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LessEqual", + name: "CNTTZD", argLen: 1, + asm: ppc64.ACNTTZD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FLessEqual", + name: "CNTTZW", argLen: 1, + asm: ppc64.ACNTTZW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "GreaterThan", + name: "POPCNTD", argLen: 1, + asm: ppc64.APOPCNTD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FGreaterThan", + name: "POPCNTW", argLen: 1, + asm: ppc64.APOPCNTW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "GreaterEqual", + name: "POPCNTB", argLen: 1, + asm: ppc64.APOPCNTB, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FGreaterEqual", - argLen: 1, + name: "FDIV", + argLen: 2, + asm: ppc64.AFDIV, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "FDIVS", + argLen: 2, + asm: ppc64.AFDIVS, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, outputs: []outputInfo{ - {0, 2048}, // R11 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "DIVD", + argLen: 2, + asm: ppc64.ADIVD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "DIVW", + argLen: 2, + asm: ppc64.ADIVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "DIVDU", + argLen: 2, + asm: ppc64.ADIVDU, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2147483648, // R31 }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "DIVWU", + argLen: 2, + asm: ppc64.ADIVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "MODUD", + argLen: 2, + asm: ppc64.AMODUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "MODSD", + argLen: 2, + asm: ppc64.AMODSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 - {1, 2048}, // R11 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "MODUW", + argLen: 2, + asm: ppc64.AMODUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "MODSW", + argLen: 2, + asm: ppc64.AMODSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "FCTIDZ", + argLen: 1, + asm: ppc64.AFCTIDZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredQuadZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "FCTIWZ", + argLen: 1, + asm: ppc64.AFCTIWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredQuadZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "FCFID", + argLen: 1, + asm: ppc64.AFCFID, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FCFIDS", + argLen: 1, + asm: ppc64.AFCFIDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FRSP", + argLen: 1, + asm: ppc64.AFRSP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredQuadMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "MFVSRD", + argLen: 1, + asm: ppc64.AMFVSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredQuadMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "MTVSRD", + argLen: 1, + asm: ppc64.AMTVSRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicStore8", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "AND", + argLen: 2, + commutative: true, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "LoweredAtomicStore32", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ANDN", + argLen: 2, + asm: ppc64.AANDN, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "LoweredAtomicStore64", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ANDNCC", + argLen: 2, + asm: ppc64.AANDNCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "LoweredAtomicLoad8", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "ANDCC", + argLen: 2, + commutative: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45795,14 +47814,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad32", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "OR", + argLen: 2, + commutative: true, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45810,14 +47829,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad64", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "ORN", + argLen: 2, + asm: ppc64.AORN, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45825,14 +47843,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoadPtr", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "ORCC", + argLen: 2, + commutative: true, + asm: ppc64.AORCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45840,16 +47858,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NOR", + argLen: 2, + commutative: true, + asm: ppc64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45857,16 +47873,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NORCC", + argLen: 2, + commutative: true, + asm: ppc64.ANORCC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45874,16 +47888,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45891,16 +47903,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "XORCC", + argLen: 2, + commutative: true, + asm: ppc64.AXORCC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45908,16 +47918,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "EQV", + argLen: 2, + commutative: true, + asm: ppc64.AEQV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45925,17 +47933,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NEG", + argLen: 1, + asm: ppc64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -45944,17 +47946,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas32", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NEGCC", + argLen: 1, + asm: ppc64.ANEGCC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -45963,1464 +47959,1272 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, + name: "BRD", + argLen: 1, + asm: ppc64.ABRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "BRW", + argLen: 1, + asm: ppc64.ABRW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "BRH", + argLen: 1, + asm: ppc64.ABRH, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - reg: regInfo{ - clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER outputs: []outputInfo{ - {0, 536870912}, // R29 - }, - }, - }, - { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: ppc64.ALWSYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 64}, // R6 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FNEG", + argLen: 1, + asm: ppc64.AFNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 32}, // R5 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - }, - }, - { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "InvertFlags", + name: "FSQRT", argLen: 1, - reg: regInfo{}, - }, - { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, - }, - - { - name: "ADD", - argLen: 2, - commutative: true, - asm: riscv.AADD, + asm: ppc64.AFSQRT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ADDI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AADDI, + name: "FSQRTS", + argLen: 1, + asm: ppc64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ADDIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.AADDIW, + name: "FFLOOR", + argLen: 1, + asm: ppc64.AFRIM, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "NEG", + name: "FCEIL", argLen: 1, - asm: riscv.ANEG, + asm: ppc64.AFRIP, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "NEGW", + name: "FTRUNC", argLen: 1, - asm: riscv.ANEGW, + asm: ppc64.AFRIZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SUB", - argLen: 2, - asm: riscv.ASUB, + name: "FROUND", + argLen: 1, + asm: ppc64.AFRIN, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SUBW", - argLen: 2, - asm: riscv.ASUBW, + name: "FABS", + argLen: 1, + asm: ppc64.AFABS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: riscv.AMUL, + name: "FNABS", + argLen: 1, + asm: ppc64.AFNABS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MULW", - argLen: 2, - commutative: true, - asm: riscv.AMULW, + name: "FCPSGN", + argLen: 2, + asm: ppc64.AFCPSGN, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MULH", - argLen: 2, - commutative: true, - asm: riscv.AMULH, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MULHU", - argLen: 2, - commutative: true, - asm: riscv.AMULHU, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMuluhilo", - argLen: 2, - resultNotInArgs: true, + name: "ANDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMuluover", - argLen: 2, - resultNotInArgs: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIV", - argLen: 2, - asm: riscv.ADIV, + name: "MOVBreg", + argLen: 1, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIVU", - argLen: 2, - asm: riscv.ADIVU, + name: "MOVBZreg", + argLen: 1, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIVW", - argLen: 2, - asm: riscv.ADIVW, + name: "MOVHreg", + argLen: 1, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIVUW", - argLen: 2, - asm: riscv.ADIVUW, + name: "MOVHZreg", + argLen: 1, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REM", - argLen: 2, - asm: riscv.AREM, + name: "MOVWreg", + argLen: 1, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REMU", - argLen: 2, - asm: riscv.AREMU, + name: "MOVWZreg", + argLen: 1, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REMW", - argLen: 2, - asm: riscv.AREMW, + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REMUW", - argLen: 2, - asm: riscv.AREMUW, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: riscv.AMOV, + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: riscv.AMOV, - reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBload", + name: "MOVWload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVB, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHload", + name: "MOVWZload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVH, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWload", + name: "MOVDload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVW, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, + name: "MOVDBRload", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOV, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, + name: "MOVWBRload", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVBU, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, + name: "MOVHBRload", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVHU, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVWU, + name: "MOVBZloadidx", + argLen: 3, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + name: "MOVHloadidx", + argLen: 3, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + name: "MOVHZloadidx", + argLen: 3, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + name: "MOVWloadidx", + argLen: 3, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + name: "MOVWZloadidx", + argLen: 3, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: riscv.AMOVB, + name: "MOVDloadidx", + argLen: 3, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: riscv.AMOVH, + name: "MOVHBRloadidx", + argLen: 3, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: riscv.AMOVW, + name: "MOVWBRloadidx", + argLen: 3, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDreg", - argLen: 1, - asm: riscv.AMOV, + name: "MOVDBRloadidx", + argLen: 3, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: riscv.AMOVBU, + name: "FMOVDloadidx", + argLen: 3, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: riscv.AMOVHU, + name: "FMOVSloadidx", + argLen: 3, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: riscv.AMOVWU, + name: "DCBT", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: ppc64.ADCBT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "MOVDBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLL", - argLen: 2, - asm: riscv.ASLL, + name: "MOVWBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLLW", - argLen: 2, - asm: riscv.ASLLW, + name: "MOVHBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRA", - argLen: 2, - asm: riscv.ASRA, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRAW", - argLen: 2, - asm: riscv.ASRAW, + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRL", - argLen: 2, - asm: riscv.ASRL, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRLW", - argLen: 2, - asm: riscv.ASRLW, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLI, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLIW, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRAI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAI, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRAIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAIW, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLI, + name: "MOVBstoreidx", + argLen: 4, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLIW, + name: "MOVHstoreidx", + argLen: 4, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SH1ADD", - argLen: 2, - asm: riscv.ASH1ADD, + name: "MOVWstoreidx", + argLen: 4, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SH2ADD", - argLen: 2, - asm: riscv.ASH2ADD, + name: "MOVDstoreidx", + argLen: 4, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SH3ADD", - argLen: 2, - asm: riscv.ASH3ADD, + name: "FMOVDstoreidx", + argLen: 4, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: riscv.AAND, + name: "FMOVSstoreidx", + argLen: 4, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ANDN", - argLen: 2, - asm: riscv.AANDN, + name: "MOVHBRstoreidx", + argLen: 4, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ANDI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AANDI, + name: "MOVWBRstoreidx", + argLen: 4, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CLZ", - argLen: 1, - asm: riscv.ACLZ, + name: "MOVDBRstoreidx", + argLen: 4, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CLZW", - argLen: 1, - asm: riscv.ACLZW, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CPOP", - argLen: 1, - asm: riscv.ACPOP, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CPOPW", - argLen: 1, - asm: riscv.ACPOPW, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CTZ", - argLen: 1, - asm: riscv.ACTZ, + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CTZW", - argLen: 1, - asm: riscv.ACTZW, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "NOT", - argLen: 1, - asm: riscv.ANOT, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: riscv.AOR, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ORN", - argLen: 2, - asm: riscv.AORN, + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVS, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AORI, + name: "FCMPU", + argLen: 2, + asm: ppc64.AFCMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "REV8", - argLen: 1, - asm: riscv.AREV8, + name: "CMP", + argLen: 2, + asm: ppc64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ROL", + name: "CMPU", argLen: 2, - asm: riscv.AROL, + asm: ppc64.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ROLW", + name: "CMPW", argLen: 2, - asm: riscv.AROLW, + asm: ppc64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ROR", + name: "CMPWU", argLen: 2, - asm: riscv.AROR, + asm: ppc64.ACMPWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "RORI", + name: "CMPconst", auxType: auxInt64, argLen: 1, - asm: riscv.ARORI, + asm: ppc64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "RORIW", + name: "CMPUconst", auxType: auxInt64, argLen: 1, - asm: riscv.ARORIW, + asm: ppc64.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "RORW", - argLen: 2, - asm: riscv.ARORW, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "XNOR", - argLen: 2, - commutative: true, - asm: riscv.AXNOR, + name: "ISEL", + auxType: auxInt32, + argLen: 3, + asm: ppc64.AISEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: riscv.AXOR, + name: "ISELZ", + auxType: auxInt32, + argLen: 2, + asm: ppc64.AISEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "XORI", - auxType: auxInt64, + name: "SETBC", + auxType: auxInt32, argLen: 1, - asm: riscv.AXORI, + asm: ppc64.ASETBC, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MIN", - argLen: 2, - commutative: true, - asm: riscv.AMIN, + name: "SETBCR", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBCR, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MAX", - argLen: 2, - commutative: true, - asm: riscv.AMAX, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MINU", - argLen: 2, - commutative: true, - asm: riscv.AMINU, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MAXU", - argLen: 2, - commutative: true, - asm: riscv.AMAXU, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SEQZ", + name: "FLessThan", argLen: 1, - asm: riscv.ASEQZ, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "LessEqual", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SNEZ", + name: "FLessEqual", argLen: 1, - asm: riscv.ASNEZ, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "GreaterThan", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLT", - argLen: 2, - asm: riscv.ASLT, + name: "FGreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "GreaterEqual", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLTI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTI, + name: "FGreaterEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 2048}, // R11 }, }, }, { - name: "SLTU", - argLen: 2, - asm: riscv.ASLTU, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLTIU", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTIU, + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 2147483648, // R31 }, }, { name: "LoweredRound32F", argLen: 1, resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, @@ -47428,353 +49232,447 @@ var opcodeTable = [...]opInfo{ name: "LoweredRound64F", argLen: 1, resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - call: true, - tailCall: true, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // X26 - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4096}, // R12 + {1, 2048}, // R11 }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4096}, // R12 }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "DUFFZERO", + name: "LoweredZero", auxType: auxInt64, argLen: 2, + clobberFlags: true, faultOnNilArg0: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 + {0, 1048576}, // R20 }, - clobbers: 16777216, // X25 + clobbers: 1048576, // R20 }, }, { - name: "DUFFCOPY", + name: "LoweredZeroShort", auxType: auxInt64, - argLen: 3, + argLen: 2, faultOnNilArg0: true, - faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 - {1, 8388608}, // X24 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 25165824, // X24 X25 }, }, { - name: "LoweredZero", + name: "LoweredQuadZeroShort", auxType: auxInt64, - argLen: 3, + argLen: 2, faultOnNilArg0: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 16, // X5 }, }, { - name: "LoweredMove", + name: "LoweredQuadZero", auxType: auxInt64, - argLen: 4, + argLen: 2, + clobberFlags: true, faultOnNilArg0: true, - faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 - {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R20 }, - clobbers: 112, // X5 X6 X7 + clobbers: 1048576, // R20 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R20 + {1, 2097152}, // R21 }, + clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, + name: "LoweredMoveShort", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, + name: "LoweredQuadMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1048576}, // R20 + {1, 2097152}, // R21 }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredQuadMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicStore8", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicStore32", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicStore64", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicExchange32", + name: "LoweredAtomicLoad8", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoadPtr", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd32", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicExchange64", + name: "LoweredAtomicAdd64", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicAdd32", + name: "LoweredAtomicExchange8", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicAdd64", + name: "LoweredAtomicExchange32", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, + name: "LoweredAtomicExchange64", + argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicCas64", + auxType: auxInt64, argLen: 4, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: riscv.AAMOANDW, + name: "LoweredAtomicCas32", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicOr32", + name: "LoweredAtomicAnd8", argLen: 3, faultOnNilArg0: true, hasSideEffects: true, - asm: riscv.AAMOORW, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, + name: "LoweredAtomicAnd32", + argLen: 3, faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - reg: regInfo{ - outputs: []outputInfo{ - {0, 33554432}, // X26 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "LoweredAtomicOr8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, reg: regInfo{ - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, reg: regInfo{ - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, @@ -47784,9 +49682,9 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER outputs: []outputInfo{ - {0, 8388608}, // X24 + {0, 536870912}, // R29 }, }, }, @@ -47794,7 +49692,7 @@ var opcodeTable = [...]opInfo{ name: "LoweredPubBarrier", argLen: 1, hasSideEffects: true, - asm: riscv.AFENCE, + asm: ppc64.ALWSYNC, reg: regInfo{}, }, { @@ -47804,8 +49702,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 64}, // X7 - {1, 134217728}, // X28 + {0, 32}, // R5 + {1, 64}, // R6 }, }, }, @@ -47816,8 +49714,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // X6 - {1, 64}, // X7 + {0, 16}, // R4 + {1, 32}, // R5 }, }, }, @@ -47828,205 +49726,227 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 + {0, 8}, // R3 + {1, 16}, // R4 }, }, }, { - name: "FADDS", + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + + { + name: "ADD", argLen: 2, commutative: true, - asm: riscv.AFADDS, + asm: riscv.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: riscv.AFSUBS, + name: "ADDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDI, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: riscv.AFMULS, + name: "ADDIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDIW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: riscv.AFDIVS, + name: "NEG", + argLen: 1, + asm: riscv.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADDS", - argLen: 3, - commutative: true, - asm: riscv.AFMADDS, + name: "NEGW", + argLen: 1, + asm: riscv.ANEGW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUBS", - argLen: 3, - commutative: true, - asm: riscv.AFMSUBS, + name: "SUB", + argLen: 2, + asm: riscv.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMADDS", - argLen: 3, - commutative: true, - asm: riscv.AFNMADDS, + name: "SUBW", + argLen: 2, + asm: riscv.ASUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMSUBS", - argLen: 3, + name: "MUL", + argLen: 2, commutative: true, - asm: riscv.AFNMSUBS, + asm: riscv.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: riscv.AFSQRTS, + name: "MULW", + argLen: 2, + commutative: true, + asm: riscv.AMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNEGS", - argLen: 1, - asm: riscv.AFNEGS, + name: "MULH", + argLen: 2, + commutative: true, + asm: riscv.AMULH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMVSX", - argLen: 1, - asm: riscv.AFMVSX, + name: "MULHU", + argLen: 2, + commutative: true, + asm: riscv.AMULHU, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTSW", - argLen: 1, - asm: riscv.AFCVTSW, + name: "LoweredMuluhilo", + argLen: 2, + resultNotInArgs: true, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTSL", - argLen: 1, - asm: riscv.AFCVTSL, + name: "LoweredMuluover", + argLen: 2, + resultNotInArgs: true, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTWS", - argLen: 1, - asm: riscv.AFCVTWS, + name: "DIV", + argLen: 2, + asm: riscv.ADIV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48034,12 +49954,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTLS", - argLen: 1, - asm: riscv.AFCVTLS, + name: "DIVU", + argLen: 2, + asm: riscv.ADIVU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48047,44 +49968,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVF, + name: "DIVW", + argLen: 2, + asm: riscv.ADIVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVF, + name: "DIVUW", + argLen: 2, + asm: riscv.ADIVUW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FEQS", - argLen: 2, - commutative: true, - asm: riscv.AFEQS, + name: "REM", + argLen: 2, + asm: riscv.AREM, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48092,14 +50010,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FNES", - argLen: 2, - commutative: true, - asm: riscv.AFNES, + name: "REMU", + argLen: 2, + asm: riscv.AREMU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48107,13 +50024,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLTS", + name: "REMW", argLen: 2, - asm: riscv.AFLTS, + asm: riscv.AREMW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48121,13 +50038,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLES", + name: "REMUW", argLen: 2, - asm: riscv.AFLES, + asm: riscv.AREMUW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48135,258 +50052,299 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredFMAXS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXS, + name: "MOVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: riscv.AMOV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LoweredFMINS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMINS, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FADDD", - argLen: 2, - commutative: true, - asm: riscv.AFADDD, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: riscv.AFSUBD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMULD", - argLen: 2, - commutative: true, - asm: riscv.AFMULD, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: riscv.AFDIVD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFMADDD, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFMSUBD, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFNMADDD, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFNMSUBD, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FSQRTD", - argLen: 1, - asm: riscv.AFSQRTD, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FNEGD", - argLen: 1, - asm: riscv.AFNEGD, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FABSD", - argLen: 1, - asm: riscv.AFABSD, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FSGNJD", - argLen: 2, - asm: riscv.AFSGNJD, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FMVDX", + name: "MOVBreg", argLen: 1, - asm: riscv.AFMVDX, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTDW", + name: "MOVHreg", argLen: 1, - asm: riscv.AFCVTDW, + asm: riscv.AMOVH, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTDL", + name: "MOVWreg", argLen: 1, - asm: riscv.AFCVTDL, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTWD", + name: "MOVDreg", argLen: 1, - asm: riscv.AFCVTWD, + asm: riscv.AMOV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48394,12 +50352,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTLD", + name: "MOVBUreg", argLen: 1, - asm: riscv.AFCVTLD, + asm: riscv.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48407,70 +50365,66 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTDS", + name: "MOVHUreg", argLen: 1, - asm: riscv.AFCVTDS, + asm: riscv.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTSD", + name: "MOVWUreg", argLen: 1, - asm: riscv.AFCVTSD, + asm: riscv.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVD, + name: "MOVDnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVD, + name: "SLL", + argLen: 2, + asm: riscv.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FEQD", - argLen: 2, - commutative: true, - asm: riscv.AFEQD, + name: "SLLW", + argLen: 2, + asm: riscv.ASLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48478,14 +50432,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FNED", - argLen: 2, - commutative: true, - asm: riscv.AFNED, + name: "SRA", + argLen: 2, + asm: riscv.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48493,13 +50446,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLTD", + name: "SRAW", argLen: 2, - asm: riscv.AFLTD, + asm: riscv.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48507,13 +50460,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLED", + name: "SRL", argLen: 2, - asm: riscv.AFLED, + asm: riscv.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48521,1879 +50474,1819 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredFMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMIND, + name: "SRLW", + argLen: 2, + asm: riscv.ASRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LoweredFMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXD, + name: "SLLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLI, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, - { - name: "FADDS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADDS, + name: "SLLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLIW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FADD", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADD, + name: "SRAI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAI, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUBS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUBS, + name: "SRAIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAIW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUB", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUB, + name: "SRLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLI, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMULS, + name: "SRLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLIW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMUL", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMUL, + name: "SH1ADD", + argLen: 2, + asm: riscv.ASH1ADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIVS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIVS, - reg: regInfo{ + name: "SH2ADD", + argLen: 2, + asm: riscv.ASH2ADD, + reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIV", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIV, + name: "SH3ADD", + argLen: 2, + asm: riscv.ASH3ADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNEGS", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEGS, + name: "AND", + argLen: 2, + commutative: true, + asm: riscv.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNEG", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEG, + name: "ANDN", + argLen: 2, + asm: riscv.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADDS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADDS, + name: "ANDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AANDI, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADD", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADD, + name: "CLZ", + argLen: 1, + asm: riscv.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUBS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUBS, + name: "CLZW", + argLen: 1, + asm: riscv.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUB", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUB, + name: "CPOP", + argLen: 1, + asm: riscv.ACPOP, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LPDFR", + name: "CPOPW", argLen: 1, - asm: s390x.ALPDFR, + asm: riscv.ACPOPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LNDFR", + name: "CTZ", argLen: 1, - asm: s390x.ALNDFR, + asm: riscv.ACTZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CPSDR", - argLen: 2, - asm: s390x.ACPSDR, + name: "CTZW", + argLen: 1, + asm: riscv.ACTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FIDBR", - auxType: auxInt8, - argLen: 1, - asm: s390x.AFIDBR, + name: "NOT", + argLen: 1, + asm: riscv.ANOT, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVS, + name: "OR", + argLen: 2, + commutative: true, + asm: riscv.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVD, + name: "ORN", + argLen: 2, + asm: riscv.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVS, + name: "ORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AORI, reg: regInfo{ - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, - }, - }, - { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVD, - reg: regInfo{ outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVS, + name: "REV8", + argLen: 1, + asm: riscv.AREV8, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVD, + name: "ROL", + argLen: 2, + asm: riscv.AROL, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "ROLW", + argLen: 2, + asm: riscv.AROLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "ROR", + argLen: 2, + asm: riscv.AROR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "RORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORI, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "RORIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORIW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADD, + name: "RORW", + argLen: 2, + asm: riscv.ARORW, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADDW, + name: "XNOR", + argLen: 2, + commutative: true, + asm: riscv.AXNOR, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADD, + name: "XOR", + argLen: 2, + commutative: true, + asm: riscv.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDWconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADDW, + name: "XORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AXORI, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADD, + name: "MIN", + argLen: 2, + commutative: true, + asm: riscv.AMIN, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADDW, + name: "MAX", + argLen: 2, + commutative: true, + asm: riscv.AMAX, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUB", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUB, + name: "MINU", + argLen: 2, + commutative: true, + asm: riscv.AMINU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUBW, + name: "MAXU", + argLen: 2, + commutative: true, + asm: riscv.AMAXU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ASUB, + name: "SEQZ", + argLen: 1, + asm: riscv.ASEQZ, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ASUBW, + name: "SNEZ", + argLen: 1, + asm: riscv.ASNEZ, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUB, + name: "SLT", + argLen: 2, + asm: riscv.ASLT, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUBW, + name: "SLTI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTI, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MULLD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, + name: "SLTU", + argLen: 2, + asm: riscv.ASLTU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, + name: "SLTIU", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTIU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, + name: "LoweredRound32F", argLen: 1, resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLWconst", - auxType: auxInt32, + name: "LoweredRound64F", argLen: 1, resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLD, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 33554432}, // X26 + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 }, + clobbers: 16777216, // X25 }, }, { - name: "MULLWload", - auxType: auxSymOff, + name: "DUFFCOPY", + auxType: auxInt64, argLen: 3, - resultInArg0: true, - clobberFlags: true, + faultOnNilArg0: true, faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 16777216}, // X25 + {1, 8388608}, // X24 }, + clobbers: 25165824, // X24 X25 }, }, { - name: "MULHD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHD, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 16}, // X5 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + clobbers: 16, // X5 }, }, { - name: "MULHDU", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHDU, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 16}, // X5 + {1, 32}, // X6 + {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + clobbers: 112, // X5 X6 X7 }, }, { - name: "DIVD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVD, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "DIVW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVW, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "DIVDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVDU, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "DIVWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVWU, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "MODD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODD, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "MODW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODW, + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "MODDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODDU, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MODWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODWU, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AAND, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AAND, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDload", - auxType: auxSymOff, + name: "LoweredAtomicAnd32", argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AAND, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOANDW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, }, }, { - name: "ANDWload", - auxType: auxSymOff, + name: "LoweredAtomicOr32", argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AANDW, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOORW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AOR, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 33554432}, // X26 }, }, }, { - name: "ORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AORW, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ORconst", + name: "LoweredWB", auxType: auxInt64, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.AOR, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 8388608}, // X24 }, }, }, { - name: "ORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AORW, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: riscv.AFENCE, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 64}, // X7 + {1, 134217728}, // X28 }, }, }, { - name: "ORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AOR, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // X6 + {1, 64}, // X7 }, }, }, { - name: "ORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AORW, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 16}, // X5 + {1, 32}, // X6 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "FADDS", + argLen: 2, + commutative: true, + asm: riscv.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "FSUBS", + argLen: 2, + asm: riscv.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "FMULS", + argLen: 2, + commutative: true, + asm: riscv.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "FDIVS", + argLen: 2, + asm: riscv.AFDIVS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXOR, + name: "FMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXORW, + name: "FMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDC", - argLen: 2, + name: "FNMADDS", + argLen: 3, commutative: true, - asm: s390x.AADDC, + asm: riscv.AFNMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDCconst", - auxType: auxInt16, - argLen: 1, - asm: s390x.AADDC, + name: "FNMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - resultInArg0: true, - asm: s390x.AADDE, + name: "FSQRTS", + argLen: 1, + asm: riscv.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBC", - argLen: 2, - asm: s390x.ASUBC, + name: "FNEGS", + argLen: 1, + asm: riscv.AFNEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBE", - argLen: 3, - resultInArg0: true, - asm: s390x.ASUBE, + name: "FMVSX", + argLen: 1, + asm: riscv.AFMVSX, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMP", - argLen: 2, - asm: s390x.ACMP, + name: "FCVTSW", + argLen: 1, + asm: riscv.AFCVTSW, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPW", - argLen: 2, - asm: s390x.ACMPW, + name: "FCVTSL", + argLen: 1, + asm: riscv.AFCVTSL, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPU", - argLen: 2, - asm: s390x.ACMPU, + name: "FCVTWS", + argLen: 1, + asm: riscv.AFCVTWS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: s390x.ACMPWU, + name: "FCVTLS", + argLen: 1, + asm: riscv.AFCVTLS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMP, + name: "FMOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPW, + name: "FMOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPUconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPU, + name: "FEQS", + argLen: 2, + commutative: true, + asm: riscv.AFEQS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPWU, + name: "FNES", + argLen: 2, + commutative: true, + asm: riscv.AFNES, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCMPS", + name: "FLTS", argLen: 2, - asm: s390x.ACEBR, + asm: riscv.AFLTS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCMP", + name: "FLES", argLen: 2, - asm: s390x.AFCMPU, + asm: riscv.AFLES, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LTDBR", - argLen: 1, - asm: s390x.ALTDBR, + name: "LoweredFMAXS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LTEBR", - argLen: 1, - asm: s390x.ALTEBR, + name: "LoweredFMINS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMINS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLD", - argLen: 2, - asm: s390x.ASLD, + name: "FADDD", + argLen: 2, + commutative: true, + asm: riscv.AFADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLW", + name: "FSUBD", argLen: 2, - asm: s390x.ASLW, + asm: riscv.AFSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLD, + name: "FMULD", + argLen: 2, + commutative: true, + asm: riscv.AFMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLWconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLW, + name: "FDIVD", + argLen: 2, + asm: riscv.AFDIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRD", - argLen: 2, - asm: s390x.ASRD, + name: "FMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRW", - argLen: 2, - asm: s390x.ASRW, + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASRD, + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRWconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASRW, + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAD", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAD, + name: "FSQRTD", + argLen: 1, + asm: riscv.AFSQRTD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAW, + name: "FNEGD", + argLen: 1, + asm: riscv.AFNEGD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRADconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAD, + name: "FABSD", + argLen: 1, + asm: riscv.AFABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAWconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAW, + name: "FSGNJD", + argLen: 2, + asm: riscv.AFSGNJD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLLG", - argLen: 2, - asm: s390x.ARLLG, + name: "FMVDX", + argLen: 1, + asm: riscv.AFMVDX, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLL", - argLen: 2, - asm: s390x.ARLL, + name: "FCVTDW", + argLen: 1, + asm: riscv.AFCVTDW, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLLconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ARLL, + name: "FCVTDL", + argLen: 1, + asm: riscv.AFCVTDL, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RXSBG", - auxType: auxS390XRotateParams, - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ARXSBG, + name: "FCVTWD", + argLen: 1, + asm: riscv.AFCVTWD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "RISBGZ", - auxType: auxS390XRotateParams, - argLen: 1, - clobberFlags: true, - asm: s390x.ARISBGZ, + name: "FCVTLD", + argLen: 1, + asm: riscv.AFCVTLD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "NEG", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEG, + name: "FCVTDS", + argLen: 1, + asm: riscv.AFCVTDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGW", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEGW, + name: "FCVTSD", + argLen: 1, + asm: riscv.AFCVTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOT", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOTW", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSQRT", - argLen: 1, - asm: s390x.AFSQRT, + name: "FEQD", + argLen: 2, + commutative: true, + asm: riscv.AFEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: s390x.AFSQRTS, + name: "FNED", + argLen: 2, + commutative: true, + asm: riscv.AFNED, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LOCGR", - auxType: auxS390XCCMask, - argLen: 3, - resultInArg0: true, - asm: s390x.ALOCGR, + name: "FLTD", + argLen: 2, + asm: riscv.AFLTD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: s390x.AMOVB, + name: "FLED", + argLen: 2, + asm: riscv.AFLED, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MOVBZreg", - argLen: 1, - asm: s390x.AMOVBZ, + name: "LoweredFMIND", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: s390x.AMOVH, + name: "LoweredFMAXD", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, + { - name: "MOVHZreg", - argLen: 1, - asm: s390x.AMOVHZ, + name: "FADDS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: s390x.AMOVW, + name: "FADD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVWZreg", - argLen: 1, - asm: s390x.AMOVWZ, + name: "FSUBS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: s390x.AMOVD, + name: "FSUB", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LDGR", - argLen: 1, - asm: s390x.ALDGR, + name: "FMULS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50401,82 +52294,89 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LGDR", - argLen: 1, - asm: s390x.ALGDR, + name: "FMUL", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMUL, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CFDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACFDBRA, + name: "FDIVS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIVS, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CGDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACGDBRA, + name: "FDIV", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIV, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CFEBRA", + name: "FNEGS", argLen: 1, clobberFlags: true, - asm: s390x.ACFEBRA, + asm: s390x.AFNEGS, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CGEBRA", + name: "FNEG", argLen: 1, clobberFlags: true, - asm: s390x.ACGEBRA, + asm: s390x.AFNEG, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CEFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEFBRA, + name: "FMADDS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50484,13 +52384,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CDFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDFBRA, + name: "FMADD", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50498,13 +52400,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CEGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEGBRA, + name: "FMSUBS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50512,13 +52416,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CDGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDGBRA, + name: "FMSUB", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50526,69 +52432,69 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CLFEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFEBR, + name: "LPDFR", + argLen: 1, + asm: s390x.ALPDFR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLFDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFDBR, + name: "LNDFR", + argLen: 1, + asm: s390x.ALNDFR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLGEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGEBR, + name: "CPSDR", + argLen: 2, + asm: s390x.ACPSDR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLGDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGDBR, + name: "FIDBR", + auxType: auxInt8, + argLen: 1, + asm: s390x.AFIDBR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CELFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELFBR, + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50596,13 +52502,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CDLFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLFBR, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50610,40 +52518,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CELGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELGBR, + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVS, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CDLGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLGBR, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LEDBR", - argLen: 1, - asm: s390x.ALEDBR, + name: "FMOVSloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50651,12 +52558,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDEBR", - argLen: 1, - asm: s390x.ALDEBR, + name: "FMOVDloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50664,45 +52574,71 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVDaddridx", + name: "FMOVSstoreidx", auxType: auxSymOff, - argLen: 2, - symEffect: SymAddr, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + { + name: "FMOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVBZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "ADD", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50710,15 +52646,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "ADDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50726,15 +52662,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50742,15 +52677,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "ADDWconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50758,15 +52692,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZload", + name: "ADDload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVWZ, + asm: s390x.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50774,15 +52711,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWload", + name: "ADDWload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVW, + asm: s390x.AADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50790,15 +52730,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "SUB", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50806,12 +52745,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWBR", - argLen: 1, - asm: s390x.AMOVWBR, + name: "SUBW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUBW, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50819,9 +52760,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBR", - argLen: 1, - asm: s390x.AMOVDBR, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50832,15 +52776,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "SUBWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50848,15 +52792,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWBRload", + name: "SUBload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVWBR, + asm: s390x.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50864,15 +52811,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBRload", + name: "SUBWload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVDBR, + asm: s390x.ASUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50880,266 +52830,291 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "MULLD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "MULLW", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHBRstore", + name: "MULLDload", auxType: auxSymOff, argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWBRstore", + name: "MULLWload", auxType: auxSymOff, argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDBRstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + name: "MULHD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVC", - auxType: auxSymValAndOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - symEffect: SymNone, - asm: s390x.AMVC, + name: "MULHDU", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHDU, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "DIVD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "DIVW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVW, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "DIVDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVDU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "DIVWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVWU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "MODD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODD, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVW, + name: "MODW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODW, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "MODDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODDU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "MODWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODWU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWBR, + name: "AND", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51147,16 +53122,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVDBR, + name: "ANDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AANDW, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51164,392 +53138,491 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "ANDWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AANDW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "ANDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "ANDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AANDW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, + name: "OR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + name: "ORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AORW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVBstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "ORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "ORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "ORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "XOR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "CLEAR", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ACLEAR, + name: "XORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXORW, reg: regInfo{ inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, + name: "XORconst", + auxType: auxInt64, argLen: 1, + resultInArg0: true, clobberFlags: true, - call: true, + asm: s390x.AXOR, reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, + name: "XORWconst", + auxType: auxInt32, argLen: 1, + resultInArg0: true, clobberFlags: true, - call: true, - tailCall: true, + asm: s390x.AXORW, reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "XORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 4096}, // R12 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "XORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXORW, reg: regInfo{ inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, + name: "ADDC", + argLen: 2, + commutative: true, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, }, { - name: "LoweredGetG", - argLen: 1, + name: "ADDCconst", + auxType: auxInt16, + argLen: 1, + asm: s390x.AADDC, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "ADDE", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: s390x.AADDE, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ - {0, 4096}, // R12 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "SUBC", + argLen: 2, + asm: s390x.ASUBC, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "SUBE", + argLen: 3, + resultInArg0: true, + asm: s390x.ASUBE, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "CMP", + argLen: 2, + asm: s390x.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "CMPW", + argLen: 2, + asm: s390x.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "CMPU", + argLen: 2, + asm: s390x.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "CMPWU", + argLen: 2, + asm: s390x.ACMPWU, reg: regInfo{ - clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - outputs: []outputInfo{ - {0, 512}, // R9 + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "CMPUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, }, { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, + name: "FCMPS", + argLen: 2, + asm: s390x.ACEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, + name: "FCMP", + argLen: 2, + asm: s390x.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "FlagOV", - argLen: 0, - reg: regInfo{}, + name: "LTDBR", + argLen: 1, + asm: s390x.ALTDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "SYNC", + name: "LTEBR", argLen: 1, - asm: s390x.ASYNC, - reg: regInfo{}, + asm: s390x.ALTEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "MOVBZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "SLD", + argLen: 2, + asm: s390x.ASLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51557,15 +53630,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "SLW", + argLen: 2, + asm: s390x.ASLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51573,15 +53644,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "SLDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51589,66 +53658,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "SLWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "SRD", + argLen: 2, + asm: s390x.ASRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "SRW", + argLen: 2, + asm: s390x.ASRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LAA", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAA, + name: "SRDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51656,18 +53714,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LAAG", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAAG, + name: "SRWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51675,173 +53728,130 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "AddTupleFirst32", - argLen: 2, - reg: regInfo{}, - }, - { - name: "AddTupleFirst64", - argLen: 2, - reg: regInfo{}, - }, - { - name: "LAN", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, + name: "SRAD", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LANfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - clobbers: 2, // R1 - }, - }, - { - name: "LAO", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, + name: "SRAW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LAOfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, + name: "SRADconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 2, // R1 }, }, { - name: "LoweredAtomicCas32", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "SRAWconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredAtomicCas64", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "RLLG", + argLen: 2, + asm: s390x.ARLLG, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredAtomicExchange32", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "RLL", + argLen: 2, + asm: s390x.ARLL, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredAtomicExchange64", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "RLLconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ARLL, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "FLOGR", - argLen: 1, + name: "RXSBG", + auxType: auxS390XRotateParams, + argLen: 2, + resultInArg0: true, clobberFlags: true, - asm: s390x.AFLOGR, + asm: s390x.ARXSBG, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 2, // R1 outputs: []outputInfo{ - {0, 1}, // R0 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "POPCNT", + name: "RISBGZ", + auxType: auxS390XRotateParams, argLen: 1, clobberFlags: true, - asm: s390x.APOPCNT, + asm: s390x.ARISBGZ, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51852,480 +53862,423 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MLGR", - argLen: 2, - asm: s390x.AMLGR, + name: "NEG", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEG, reg: regInfo{ inputs: []inputInfo{ - {1, 8}, // R3 {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "SumBytes2", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes4", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes8", - argLen: 1, - reg: regInfo{}, - }, - { - name: "STMG2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "NEGW", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEGW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - }, - }, - { - name: "STMG3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "STMG4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "NOT", + argLen: 1, + resultInArg0: true, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "STM2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "NOTW", + argLen: 1, + resultInArg0: true, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "STM3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "FSQRT", + argLen: 1, + asm: s390x.AFSQRT, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "STM4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "FSQRTS", + argLen: 1, + asm: s390x.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "LOCGR", + auxType: auxS390XCCMask, + argLen: 3, + resultInArg0: true, + asm: s390x.ALOCGR, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "MOVBreg", + argLen: 1, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 2, // R1 - }, - }, - - { - name: "LoweredStaticCall", - auxType: auxCallOff, - argLen: 1, - call: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g - }, - }, - { - name: "LoweredTailCall", - auxType: auxCallOff, - argLen: 1, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredClosureCall", - auxType: auxCallOff, - argLen: 3, - call: true, + name: "MOVBZreg", + argLen: 1, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredInterCall", - auxType: auxCallOff, - argLen: 2, - call: true, + name: "MOVHreg", + argLen: 1, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredAddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "MOVHZreg", + argLen: 1, + asm: s390x.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 3, + name: "MOVWreg", + argLen: 1, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 2, + name: "MOVWZreg", + argLen: 1, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerPC", + name: "MOVDconst", + auxType: auxInt64, argLen: 0, rematerializeable: true, + asm: s390x.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "LDGR", + argLen: 1, + asm: s390x.ALDGR, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "LGDR", + argLen: 1, + asm: s390x.ALGDR, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, + name: "CFDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFDBRA, reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredConvert", - argLen: 2, + name: "CGDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGDBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "Select", - argLen: 3, - asm: wasm.ASelect, + name: "CFEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFEBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Load8U", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8U, + name: "CGEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGEBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Load8S", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8S, + name: "CEFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEFBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load16U", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16U, + name: "CDFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDFBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load16S", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16S, + name: "CEGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEGBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load32U", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32U, + name: "CDGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDGBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load32S", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32S, + name: "CLFEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFEBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load, + name: "CLFDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Store8", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store8, + name: "CLGEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGEBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Store16", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store16, + name: "CLGDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGDBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Store32", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store32, + name: "CELFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELFBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Store", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store, + name: "CDLFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLFBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F32Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AF32Load, + name: "CELGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELGBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -52333,790 +54286,828 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "F64Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AF64Load, + name: "CDLGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLGBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F32Store", - auxType: auxInt64, - argLen: 3, - asm: wasm.AF32Store, + name: "LEDBR", + argLen: 1, + asm: s390x.ALEDBR, reg: regInfo{ inputs: []inputInfo{ - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F64Store", - auxType: auxInt64, - argLen: 3, - asm: wasm.AF64Store, + name: "LDEBR", + argLen: 1, + asm: s390x.ALDEBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - }, - }, - { - name: "I64Const", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F32Const", - auxType: auxFloat32, - argLen: 0, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, rematerializeable: true, + symEffect: SymAddr, reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Const", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, + name: "MOVDaddridx", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Eqz", - argLen: 1, - asm: wasm.AI64Eqz, + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Eq", - argLen: 2, - asm: wasm.AI64Eq, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Ne", - argLen: 2, - asm: wasm.AI64Ne, + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LtS", - argLen: 2, - asm: wasm.AI64LtS, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LtU", - argLen: 2, - asm: wasm.AI64LtU, + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GtS", - argLen: 2, - asm: wasm.AI64GtS, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GtU", - argLen: 2, - asm: wasm.AI64GtU, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LeS", - argLen: 2, - asm: wasm.AI64LeS, + name: "MOVWBR", + argLen: 1, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LeU", - argLen: 2, - asm: wasm.AI64LeU, + name: "MOVDBR", + argLen: 1, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GeS", - argLen: 2, - asm: wasm.AI64GeS, + name: "MOVHBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GeU", - argLen: 2, - asm: wasm.AI64GeU, + name: "MOVWBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Eq", - argLen: 2, - asm: wasm.AF32Eq, + name: "MOVDBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Ne", - argLen: 2, - asm: wasm.AF32Ne, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Lt", - argLen: 2, - asm: wasm.AF32Lt, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Gt", - argLen: 2, - asm: wasm.AF32Gt, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Le", - argLen: 2, - asm: wasm.AF32Le, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Ge", - argLen: 2, - asm: wasm.AF32Ge, + name: "MOVHBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Eq", - argLen: 2, - asm: wasm.AF64Eq, + name: "MOVWBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Ne", - argLen: 2, - asm: wasm.AF64Ne, + name: "MOVDBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Lt", - argLen: 2, - asm: wasm.AF64Lt, + name: "MVC", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + symEffect: SymNone, + asm: s390x.AMVC, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Gt", - argLen: 2, - asm: wasm.AF64Gt, + name: "MOVBZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Le", - argLen: 2, - asm: wasm.AF64Le, + name: "MOVBloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Ge", - argLen: 2, - asm: wasm.AF64Ge, + name: "MOVHZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Add", - argLen: 2, - asm: wasm.AI64Add, + name: "MOVHloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64AddConst", - auxType: auxInt64, - argLen: 1, - asm: wasm.AI64Add, + name: "MOVWZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Sub", - argLen: 2, - asm: wasm.AI64Sub, + name: "MOVWloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Mul", - argLen: 2, - asm: wasm.AI64Mul, + name: "MOVDloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64DivS", - argLen: 2, - asm: wasm.AI64DivS, + name: "MOVHBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64DivU", - argLen: 2, - asm: wasm.AI64DivU, + name: "MOVWBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64RemS", - argLen: 2, - asm: wasm.AI64RemS, + name: "MOVDBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64RemU", - argLen: 2, - asm: wasm.AI64RemU, + name: "MOVBstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64And", - argLen: 2, - asm: wasm.AI64And, + name: "MOVHstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Or", - argLen: 2, - asm: wasm.AI64Or, + name: "MOVWstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Xor", - argLen: 2, - asm: wasm.AI64Xor, + name: "MOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Shl", - argLen: 2, - asm: wasm.AI64Shl, + name: "MOVHBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64ShrS", - argLen: 2, - asm: wasm.AI64ShrS, + name: "MOVWBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64ShrU", - argLen: 2, - asm: wasm.AI64ShrU, + name: "MOVDBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Neg", - argLen: 1, - asm: wasm.AF32Neg, + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Add", - argLen: 2, - asm: wasm.AF32Add, + name: "MOVHstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Sub", - argLen: 2, - asm: wasm.AF32Sub, + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Mul", - argLen: 2, - asm: wasm.AF32Mul, + name: "MOVDstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Div", - argLen: 2, - asm: wasm.AF32Div, + name: "CLEAR", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ACLEAR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Neg", - argLen: 1, - asm: wasm.AF64Neg, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Add", - argLen: 2, - asm: wasm.AF64Add, + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Sub", - argLen: 2, - asm: wasm.AF64Sub, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4096}, // R12 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Mul", - argLen: 2, - asm: wasm.AF64Mul, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Div", - argLen: 2, - asm: wasm.AF64Div, + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64TruncSatF64S", - argLen: 1, - asm: wasm.AI64TruncSatF64S, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4096}, // R12 }, }, }, { - name: "I64TruncSatF64U", - argLen: 1, - asm: wasm.AI64TruncSatF64U, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64TruncSatF32S", - argLen: 1, - asm: wasm.AI64TruncSatF32S, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64TruncSatF32U", - argLen: 1, - asm: wasm.AI64TruncSatF32U, + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32ConvertI64S", - argLen: 1, - asm: wasm.AF32ConvertI64S, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -53124,12 +55115,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "F32ConvertI64U", - argLen: 1, - asm: wasm.AF32ConvertI64U, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -53137,10242 +55129,14022 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "F64ConvertI64S", - argLen: 1, - asm: wasm.AF64ConvertI64S, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, + clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 512}, // R9 }, }, }, { - name: "F64ConvertI64U", - argLen: 1, - asm: wasm.AF64ConvertI64U, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "F32DemoteF64", - argLen: 1, - asm: wasm.AF32DemoteF64, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "F64PromoteF32", - argLen: 1, - asm: wasm.AF64PromoteF32, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "I64Extend8S", + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagOV", + argLen: 0, + reg: regInfo{}, + }, + { + name: "SYNC", argLen: 1, - asm: wasm.AI64Extend8S, + asm: s390x.ASYNC, + reg: regInfo{}, + }, + { + name: "MOVBZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Extend16S", - argLen: 1, - asm: wasm.AI64Extend16S, + name: "MOVWZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Extend32S", - argLen: 1, - asm: wasm.AI64Extend32S, + name: "MOVDatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Sqrt", - argLen: 1, - asm: wasm.AF32Sqrt, + name: "MOVBatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Trunc", - argLen: 1, - asm: wasm.AF32Trunc, + name: "MOVWatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Ceil", - argLen: 1, - asm: wasm.AF32Ceil, + name: "MOVDatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Floor", - argLen: 1, - asm: wasm.AF32Floor, + name: "LAA", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAA, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Nearest", - argLen: 1, - asm: wasm.AF32Nearest, + name: "LAAG", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAAG, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Abs", - argLen: 1, - asm: wasm.AF32Abs, + name: "AddTupleFirst32", + argLen: 2, + reg: regInfo{}, + }, + { + name: "AddTupleFirst64", + argLen: 2, + reg: regInfo{}, + }, + { + name: "LAN", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Copysign", - argLen: 2, - asm: wasm.AF32Copysign, + name: "LANfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 2, // R1 }, }, { - name: "F64Sqrt", - argLen: 1, - asm: wasm.AF64Sqrt, + name: "LAO", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "LAOfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 2, // R1 }, }, { - name: "F64Trunc", - argLen: 1, - asm: wasm.AF64Trunc, + name: "LoweredAtomicCas32", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 1, // R0 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Ceil", - argLen: 1, - asm: wasm.AF64Ceil, + name: "LoweredAtomicCas64", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 1, // R0 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Floor", - argLen: 1, - asm: wasm.AF64Floor, + name: "LoweredAtomicExchange32", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 1}, // R0 }, }, }, { - name: "F64Nearest", - argLen: 1, - asm: wasm.AF64Nearest, + name: "LoweredAtomicExchange64", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 1}, // R0 }, }, }, { - name: "F64Abs", - argLen: 1, - asm: wasm.AF64Abs, + name: "FLOGR", + argLen: 1, + clobberFlags: true, + asm: s390x.AFLOGR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, + clobbers: 2, // R1 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1}, // R0 }, }, }, { - name: "F64Copysign", - argLen: 2, - asm: wasm.AF64Copysign, + name: "POPCNT", + argLen: 1, + clobberFlags: true, + asm: s390x.APOPCNT, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Ctz", - argLen: 1, - asm: wasm.AI64Ctz, + name: "MLGR", + argLen: 2, + asm: s390x.AMLGR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 8}, // R3 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "I64Clz", + name: "SumBytes2", argLen: 1, - asm: wasm.AI64Clz, + reg: regInfo{}, + }, + { + name: "SumBytes4", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes8", + argLen: 1, + reg: regInfo{}, + }, + { + name: "STMG2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I32Rotl", - argLen: 2, - asm: wasm.AI32Rotl, + name: "STMG3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Rotl", - argLen: 2, - asm: wasm.AI64Rotl, + name: "STMG4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Popcnt", - argLen: 1, - asm: wasm.AI64Popcnt, + name: "STM2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, - { - name: "Add8", - argLen: 2, - commutative: true, - generic: true, + name: "STM3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, }, { - name: "Add16", - argLen: 2, - commutative: true, - generic: true, + name: "STM4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, }, { - name: "Add32", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 6, // R1 R2 + }, }, { - name: "Add64", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, }, + { - name: "AddPtr", - argLen: 2, - generic: true, + name: "LoweredStaticCall", + auxType: auxCallOff, + argLen: 1, + call: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Add32F", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredTailCall", + auxType: auxCallOff, + argLen: 1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Add64F", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredClosureCall", + auxType: auxCallOff, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Sub8", + name: "LoweredInterCall", + auxType: auxCallOff, argLen: 2, - generic: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Sub16", - argLen: 2, - generic: true, + name: "LoweredAddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, }, { - name: "Sub32", - argLen: 2, + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredConvert", + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "Select", + argLen: 3, + asm: wasm.ASelect, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Store8", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store16", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store32", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F32Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF32Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF32Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Const", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Const", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Const", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Eqz", + argLen: 1, + asm: wasm.AI64Eqz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Eq", + argLen: 2, + asm: wasm.AI64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Ne", + argLen: 2, + asm: wasm.AI64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtS", + argLen: 2, + asm: wasm.AI64LtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtU", + argLen: 2, + asm: wasm.AI64LtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtS", + argLen: 2, + asm: wasm.AI64GtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtU", + argLen: 2, + asm: wasm.AI64GtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeS", + argLen: 2, + asm: wasm.AI64LeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeU", + argLen: 2, + asm: wasm.AI64LeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeS", + argLen: 2, + asm: wasm.AI64GeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeU", + argLen: 2, + asm: wasm.AI64GeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Eq", + argLen: 2, + asm: wasm.AF32Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ne", + argLen: 2, + asm: wasm.AF32Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Lt", + argLen: 2, + asm: wasm.AF32Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Gt", + argLen: 2, + asm: wasm.AF32Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Le", + argLen: 2, + asm: wasm.AF32Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ge", + argLen: 2, + asm: wasm.AF32Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Eq", + argLen: 2, + asm: wasm.AF64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ne", + argLen: 2, + asm: wasm.AF64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Lt", + argLen: 2, + asm: wasm.AF64Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Gt", + argLen: 2, + asm: wasm.AF64Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Le", + argLen: 2, + asm: wasm.AF64Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ge", + argLen: 2, + asm: wasm.AF64Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Add", + argLen: 2, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64AddConst", + auxType: auxInt64, + argLen: 1, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Sub", + argLen: 2, + asm: wasm.AI64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Mul", + argLen: 2, + asm: wasm.AI64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivS", + argLen: 2, + asm: wasm.AI64DivS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivU", + argLen: 2, + asm: wasm.AI64DivU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemS", + argLen: 2, + asm: wasm.AI64RemS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemU", + argLen: 2, + asm: wasm.AI64RemU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64And", + argLen: 2, + asm: wasm.AI64And, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Or", + argLen: 2, + asm: wasm.AI64Or, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Xor", + argLen: 2, + asm: wasm.AI64Xor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Shl", + argLen: 2, + asm: wasm.AI64Shl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrS", + argLen: 2, + asm: wasm.AI64ShrS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrU", + argLen: 2, + asm: wasm.AI64ShrU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Neg", + argLen: 1, + asm: wasm.AF32Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Add", + argLen: 2, + asm: wasm.AF32Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Sub", + argLen: 2, + asm: wasm.AF32Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Mul", + argLen: 2, + asm: wasm.AF32Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Div", + argLen: 2, + asm: wasm.AF32Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Neg", + argLen: 1, + asm: wasm.AF64Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Add", + argLen: 2, + asm: wasm.AF64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Sub", + argLen: 2, + asm: wasm.AF64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Mul", + argLen: 2, + asm: wasm.AF64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Div", + argLen: 2, + asm: wasm.AF64Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64TruncSatF64S", + argLen: 1, + asm: wasm.AI64TruncSatF64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF64U", + argLen: 1, + asm: wasm.AI64TruncSatF64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32S", + argLen: 1, + asm: wasm.AI64TruncSatF32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32U", + argLen: 1, + asm: wasm.AI64TruncSatF32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32ConvertI64S", + argLen: 1, + asm: wasm.AF32ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32ConvertI64U", + argLen: 1, + asm: wasm.AF32ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64ConvertI64S", + argLen: 1, + asm: wasm.AF64ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64ConvertI64U", + argLen: 1, + asm: wasm.AF64ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32DemoteF64", + argLen: 1, + asm: wasm.AF32DemoteF64, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64PromoteF32", + argLen: 1, + asm: wasm.AF64PromoteF32, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Extend8S", + argLen: 1, + asm: wasm.AI64Extend8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend16S", + argLen: 1, + asm: wasm.AI64Extend16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend32S", + argLen: 1, + asm: wasm.AI64Extend32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Sqrt", + argLen: 1, + asm: wasm.AF32Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Trunc", + argLen: 1, + asm: wasm.AF32Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Ceil", + argLen: 1, + asm: wasm.AF32Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Floor", + argLen: 1, + asm: wasm.AF32Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Nearest", + argLen: 1, + asm: wasm.AF32Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Abs", + argLen: 1, + asm: wasm.AF32Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Copysign", + argLen: 2, + asm: wasm.AF32Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Sqrt", + argLen: 1, + asm: wasm.AF64Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Trunc", + argLen: 1, + asm: wasm.AF64Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Ceil", + argLen: 1, + asm: wasm.AF64Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Floor", + argLen: 1, + asm: wasm.AF64Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Nearest", + argLen: 1, + asm: wasm.AF64Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Abs", + argLen: 1, + asm: wasm.AF64Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Copysign", + argLen: 2, + asm: wasm.AF64Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Ctz", + argLen: 1, + asm: wasm.AI64Ctz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Clz", + argLen: 1, + asm: wasm.AI64Clz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32Rotl", + argLen: 2, + asm: wasm.AI32Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Rotl", + argLen: 2, + asm: wasm.AI64Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Popcnt", + argLen: 1, + asm: wasm.AI64Popcnt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + + { + name: "Add8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddPtr", + argLen: 2, + generic: true, + }, + { + name: "Add32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Sub8", + argLen: 2, + generic: true, + }, + { + name: "Sub16", + argLen: 2, + generic: true, + }, + { + name: "Sub32", + argLen: 2, + generic: true, + }, + { + name: "Sub64", + argLen: 2, + generic: true, + }, + { + name: "SubPtr", + argLen: 2, + generic: true, + }, + { + name: "Sub32F", + argLen: 2, + generic: true, + }, + { + name: "Sub64F", + argLen: 2, + generic: true, + }, + { + name: "Mul8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Div32F", + argLen: 2, + generic: true, + }, + { + name: "Div64F", + argLen: 2, + generic: true, + }, + { + name: "Hmul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul32u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Avg32u", + argLen: 2, + generic: true, + }, + { + name: "Avg64u", + argLen: 2, + generic: true, + }, + { + name: "Div8", + argLen: 2, + generic: true, + }, + { + name: "Div8u", + argLen: 2, + generic: true, + }, + { + name: "Div16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div16u", + argLen: 2, + generic: true, + }, + { + name: "Div32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div32u", + argLen: 2, + generic: true, + }, + { + name: "Div64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div64u", + argLen: 2, + generic: true, + }, + { + name: "Div128u", + argLen: 3, + generic: true, + }, + { + name: "Mod8", + argLen: 2, + generic: true, + }, + { + name: "Mod8u", + argLen: 2, + generic: true, + }, + { + name: "Mod16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod16u", + argLen: 2, + generic: true, + }, + { + name: "Mod32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod32u", + argLen: 2, + generic: true, + }, + { + name: "Mod64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod64u", + argLen: 2, + generic: true, + }, + { + name: "And8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Lsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Eq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqInter", + argLen: 2, + generic: true, + }, + { + name: "EqSlice", + argLen: 2, + generic: true, + }, + { + name: "Eq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqInter", + argLen: 2, + generic: true, + }, + { + name: "NeqSlice", + argLen: 2, + generic: true, + }, + { + name: "Neq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Less8", + argLen: 2, generic: true, }, { - name: "Sub64", + name: "Less8U", argLen: 2, generic: true, }, { - name: "SubPtr", + name: "Less16", argLen: 2, generic: true, }, { - name: "Sub32F", + name: "Less16U", argLen: 2, generic: true, }, { - name: "Sub64F", + name: "Less32", argLen: 2, generic: true, }, { - name: "Mul8", - argLen: 2, - commutative: true, - generic: true, + name: "Less32U", + argLen: 2, + generic: true, }, { - name: "Mul16", - argLen: 2, - commutative: true, - generic: true, + name: "Less64", + argLen: 2, + generic: true, }, { - name: "Mul32", - argLen: 2, - commutative: true, - generic: true, + name: "Less64U", + argLen: 2, + generic: true, }, { - name: "Mul64", - argLen: 2, - commutative: true, - generic: true, + name: "Less32F", + argLen: 2, + generic: true, }, { - name: "Mul32F", - argLen: 2, - commutative: true, - generic: true, + name: "Less64F", + argLen: 2, + generic: true, }, { - name: "Mul64F", - argLen: 2, - commutative: true, - generic: true, + name: "Leq8", + argLen: 2, + generic: true, }, { - name: "Div32F", + name: "Leq8U", argLen: 2, generic: true, }, { - name: "Div64F", + name: "Leq16", argLen: 2, generic: true, }, { - name: "Hmul32", - argLen: 2, - commutative: true, - generic: true, + name: "Leq16U", + argLen: 2, + generic: true, }, { - name: "Hmul32u", + name: "Leq32", + argLen: 2, + generic: true, + }, + { + name: "Leq32U", + argLen: 2, + generic: true, + }, + { + name: "Leq64", + argLen: 2, + generic: true, + }, + { + name: "Leq64U", + argLen: 2, + generic: true, + }, + { + name: "Leq32F", + argLen: 2, + generic: true, + }, + { + name: "Leq64F", + argLen: 2, + generic: true, + }, + { + name: "CondSelect", + argLen: 3, + generic: true, + }, + { + name: "AndB", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64", + name: "OrB", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64u", + name: "EqB", argLen: 2, commutative: true, generic: true, }, { - name: "Mul32uhilo", + name: "NeqB", argLen: 2, commutative: true, generic: true, }, { - name: "Mul64uhilo", - argLen: 2, - commutative: true, - generic: true, + name: "Not", + argLen: 1, + generic: true, + }, + { + name: "Neg8", + argLen: 1, + generic: true, + }, + { + name: "Neg16", + argLen: 1, + generic: true, + }, + { + name: "Neg32", + argLen: 1, + generic: true, + }, + { + name: "Neg64", + argLen: 1, + generic: true, + }, + { + name: "Neg32F", + argLen: 1, + generic: true, + }, + { + name: "Neg64F", + argLen: 1, + generic: true, + }, + { + name: "Com8", + argLen: 1, + generic: true, + }, + { + name: "Com16", + argLen: 1, + generic: true, + }, + { + name: "Com32", + argLen: 1, + generic: true, + }, + { + name: "Com64", + argLen: 1, + generic: true, + }, + { + name: "Ctz8", + argLen: 1, + generic: true, + }, + { + name: "Ctz16", + argLen: 1, + generic: true, + }, + { + name: "Ctz32", + argLen: 1, + generic: true, + }, + { + name: "Ctz64", + argLen: 1, + generic: true, + }, + { + name: "Ctz64On32", + argLen: 2, + generic: true, + }, + { + name: "Ctz8NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz16NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz32NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz64NonZero", + argLen: 1, + generic: true, + }, + { + name: "BitLen8", + argLen: 1, + generic: true, + }, + { + name: "BitLen16", + argLen: 1, + generic: true, + }, + { + name: "BitLen32", + argLen: 1, + generic: true, + }, + { + name: "BitLen64", + argLen: 1, + generic: true, + }, + { + name: "Bswap16", + argLen: 1, + generic: true, + }, + { + name: "Bswap32", + argLen: 1, + generic: true, + }, + { + name: "Bswap64", + argLen: 1, + generic: true, + }, + { + name: "BitRev8", + argLen: 1, + generic: true, + }, + { + name: "BitRev16", + argLen: 1, + generic: true, + }, + { + name: "BitRev32", + argLen: 1, + generic: true, + }, + { + name: "BitRev64", + argLen: 1, + generic: true, + }, + { + name: "PopCount8", + argLen: 1, + generic: true, + }, + { + name: "PopCount16", + argLen: 1, + generic: true, + }, + { + name: "PopCount32", + argLen: 1, + generic: true, + }, + { + name: "PopCount64", + argLen: 1, + generic: true, + }, + { + name: "RotateLeft64", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft32", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft16", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft8", + argLen: 2, + generic: true, + }, + { + name: "Sqrt", + argLen: 1, + generic: true, + }, + { + name: "Sqrt32", + argLen: 1, + generic: true, + }, + { + name: "Floor", + argLen: 1, + generic: true, + }, + { + name: "Ceil", + argLen: 1, + generic: true, + }, + { + name: "Trunc", + argLen: 1, + generic: true, + }, + { + name: "Round", + argLen: 1, + generic: true, + }, + { + name: "RoundToEven", + argLen: 1, + generic: true, + }, + { + name: "Abs", + argLen: 1, + generic: true, + }, + { + name: "Copysign", + argLen: 2, + generic: true, + }, + { + name: "Min64", + argLen: 2, + generic: true, + }, + { + name: "Max64", + argLen: 2, + generic: true, + }, + { + name: "Min64u", + argLen: 2, + generic: true, + }, + { + name: "Max64u", + argLen: 2, + generic: true, + }, + { + name: "Min64F", + argLen: 2, + generic: true, + }, + { + name: "Min32F", + argLen: 2, + generic: true, + }, + { + name: "Max64F", + argLen: 2, + generic: true, + }, + { + name: "Max32F", + argLen: 2, + generic: true, + }, + { + name: "FMA", + argLen: 3, + generic: true, + }, + { + name: "Phi", + argLen: -1, + zeroWidth: true, + generic: true, + }, + { + name: "Copy", + argLen: 1, + generic: true, + }, + { + name: "Convert", + argLen: 2, + resultInArg0: true, + zeroWidth: true, + generic: true, + }, + { + name: "ConstBool", + auxType: auxBool, + argLen: 0, + generic: true, + }, + { + name: "ConstString", + auxType: auxString, + argLen: 0, + generic: true, + }, + { + name: "ConstNil", + argLen: 0, + generic: true, + }, + { + name: "Const8", + auxType: auxInt8, + argLen: 0, + generic: true, + }, + { + name: "Const16", + auxType: auxInt16, + argLen: 0, + generic: true, + }, + { + name: "Const32", + auxType: auxInt32, + argLen: 0, + generic: true, + }, + { + name: "Const64", + auxType: auxInt64, + argLen: 0, + generic: true, + }, + { + name: "Const32F", + auxType: auxFloat32, + argLen: 0, + generic: true, + }, + { + name: "Const64F", + auxType: auxFloat64, + argLen: 0, + generic: true, + }, + { + name: "ConstInterface", + argLen: 0, + generic: true, + }, + { + name: "ConstSlice", + argLen: 0, + generic: true, + }, + { + name: "InitMem", + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Arg", + auxType: auxSymOff, + argLen: 0, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "ArgIntReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "ArgFloatReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Addr", + auxType: auxSym, + argLen: 1, + symEffect: SymAddr, + generic: true, + }, + { + name: "LocalAddr", + auxType: auxSym, + argLen: 2, + symEffect: SymAddr, + generic: true, }, { - name: "Mul32uover", - argLen: 2, - commutative: true, - generic: true, + name: "SP", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, }, { - name: "Mul64uover", - argLen: 2, - commutative: true, - generic: true, + name: "SB", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, }, { - name: "Avg32u", + name: "SPanchored", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "Load", argLen: 2, generic: true, }, { - name: "Avg64u", + name: "Dereference", argLen: 2, generic: true, }, { - name: "Div8", - argLen: 2, + name: "Store", + auxType: auxTyp, + argLen: 3, generic: true, }, { - name: "Div8u", - argLen: 2, + name: "Move", + auxType: auxTypSize, + argLen: 3, generic: true, }, { - name: "Div16", - auxType: auxBool, + name: "Zero", + auxType: auxTypSize, argLen: 2, generic: true, }, { - name: "Div16u", - argLen: 2, + name: "StoreWB", + auxType: auxTyp, + argLen: 3, generic: true, }, { - name: "Div32", - auxType: auxBool, - argLen: 2, + name: "MoveWB", + auxType: auxTypSize, + argLen: 3, generic: true, }, { - name: "Div32u", + name: "ZeroWB", + auxType: auxTypSize, argLen: 2, generic: true, }, { - name: "Div64", - auxType: auxBool, - argLen: 2, + name: "WBend", + argLen: 1, generic: true, }, { - name: "Div64u", - argLen: 2, + name: "WB", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Div128u", + name: "HasCPUFeature", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "PanicBounds", + auxType: auxInt64, argLen: 3, + call: true, generic: true, }, { - name: "Mod8", - argLen: 2, + name: "PanicExtend", + auxType: auxInt64, + argLen: 4, + call: true, generic: true, }, { - name: "Mod8u", - argLen: 2, + name: "ClosureCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod16", - auxType: auxBool, - argLen: 2, + name: "StaticCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod16u", - argLen: 2, + name: "InterCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod32", - auxType: auxBool, - argLen: 2, + name: "TailCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod32u", - argLen: 2, + name: "ClosureLECall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod64", - auxType: auxBool, - argLen: 2, + name: "StaticLECall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod64u", - argLen: 2, + name: "InterLECall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "And8", - argLen: 2, - commutative: true, - generic: true, + name: "TailLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, }, { - name: "And16", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt8to16", + argLen: 1, + generic: true, }, { - name: "And32", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt8to32", + argLen: 1, + generic: true, }, { - name: "And64", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt8to64", + argLen: 1, + generic: true, }, { - name: "Or8", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt16to32", + argLen: 1, + generic: true, }, { - name: "Or16", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt16to64", + argLen: 1, + generic: true, }, { - name: "Or32", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt32to64", + argLen: 1, + generic: true, }, { - name: "Or64", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt8to16", + argLen: 1, + generic: true, }, { - name: "Xor8", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt8to32", + argLen: 1, + generic: true, }, { - name: "Xor16", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt8to64", + argLen: 1, + generic: true, }, { - name: "Xor32", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt16to32", + argLen: 1, + generic: true, }, { - name: "Xor64", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt16to64", + argLen: 1, + generic: true, }, { - name: "Lsh8x8", - auxType: auxBool, - argLen: 2, + name: "ZeroExt32to64", + argLen: 1, generic: true, }, { - name: "Lsh8x16", - auxType: auxBool, - argLen: 2, + name: "Trunc16to8", + argLen: 1, generic: true, }, { - name: "Lsh8x32", - auxType: auxBool, - argLen: 2, + name: "Trunc32to8", + argLen: 1, generic: true, }, { - name: "Lsh8x64", - auxType: auxBool, - argLen: 2, + name: "Trunc32to16", + argLen: 1, generic: true, }, { - name: "Lsh16x8", - auxType: auxBool, - argLen: 2, + name: "Trunc64to8", + argLen: 1, generic: true, }, { - name: "Lsh16x16", - auxType: auxBool, - argLen: 2, + name: "Trunc64to16", + argLen: 1, generic: true, }, { - name: "Lsh16x32", - auxType: auxBool, - argLen: 2, + name: "Trunc64to32", + argLen: 1, generic: true, }, { - name: "Lsh16x64", - auxType: auxBool, - argLen: 2, + name: "Cvt32to32F", + argLen: 1, generic: true, }, { - name: "Lsh32x8", - auxType: auxBool, - argLen: 2, + name: "Cvt32to64F", + argLen: 1, generic: true, }, { - name: "Lsh32x16", - auxType: auxBool, - argLen: 2, + name: "Cvt64to32F", + argLen: 1, generic: true, }, { - name: "Lsh32x32", - auxType: auxBool, - argLen: 2, + name: "Cvt64to64F", + argLen: 1, generic: true, }, { - name: "Lsh32x64", - auxType: auxBool, - argLen: 2, + name: "Cvt32Fto32", + argLen: 1, generic: true, }, { - name: "Lsh64x8", - auxType: auxBool, - argLen: 2, + name: "Cvt32Fto64", + argLen: 1, generic: true, }, { - name: "Lsh64x16", - auxType: auxBool, - argLen: 2, + name: "Cvt64Fto32", + argLen: 1, generic: true, }, { - name: "Lsh64x32", - auxType: auxBool, - argLen: 2, + name: "Cvt64Fto64", + argLen: 1, generic: true, }, { - name: "Lsh64x64", - auxType: auxBool, - argLen: 2, + name: "Cvt32Fto64F", + argLen: 1, generic: true, }, { - name: "Rsh8x8", - auxType: auxBool, - argLen: 2, + name: "Cvt64Fto32F", + argLen: 1, generic: true, }, { - name: "Rsh8x16", - auxType: auxBool, - argLen: 2, + name: "CvtBoolToUint8", + argLen: 1, generic: true, }, { - name: "Rsh8x32", - auxType: auxBool, - argLen: 2, + name: "Round32F", + argLen: 1, generic: true, }, { - name: "Rsh8x64", - auxType: auxBool, - argLen: 2, + name: "Round64F", + argLen: 1, generic: true, }, { - name: "Rsh16x8", - auxType: auxBool, - argLen: 2, + name: "IsNonNil", + argLen: 1, generic: true, }, { - name: "Rsh16x16", - auxType: auxBool, + name: "IsInBounds", argLen: 2, generic: true, }, { - name: "Rsh16x32", - auxType: auxBool, + name: "IsSliceInBounds", argLen: 2, generic: true, }, { - name: "Rsh16x64", - auxType: auxBool, - argLen: 2, + name: "NilCheck", + argLen: 2, + nilCheck: true, + generic: true, + }, + { + name: "GetG", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "GetClosurePtr", + argLen: 0, generic: true, }, { - name: "Rsh32x8", - auxType: auxBool, - argLen: 2, + name: "GetCallerPC", + argLen: 0, generic: true, }, { - name: "Rsh32x16", - auxType: auxBool, - argLen: 2, + name: "GetCallerSP", + argLen: 1, generic: true, }, { - name: "Rsh32x32", - auxType: auxBool, + name: "PtrIndex", argLen: 2, generic: true, }, { - name: "Rsh32x64", - auxType: auxBool, - argLen: 2, + name: "OffPtr", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Rsh64x8", - auxType: auxBool, - argLen: 2, + name: "SliceMake", + argLen: 3, generic: true, }, { - name: "Rsh64x16", - auxType: auxBool, - argLen: 2, + name: "SlicePtr", + argLen: 1, generic: true, }, { - name: "Rsh64x32", - auxType: auxBool, - argLen: 2, + name: "SliceLen", + argLen: 1, generic: true, }, { - name: "Rsh64x64", - auxType: auxBool, - argLen: 2, + name: "SliceCap", + argLen: 1, generic: true, }, { - name: "Rsh8Ux8", - auxType: auxBool, - argLen: 2, + name: "SlicePtrUnchecked", + argLen: 1, generic: true, }, { - name: "Rsh8Ux16", - auxType: auxBool, + name: "ComplexMake", argLen: 2, generic: true, }, { - name: "Rsh8Ux32", - auxType: auxBool, - argLen: 2, + name: "ComplexReal", + argLen: 1, generic: true, }, { - name: "Rsh8Ux64", - auxType: auxBool, - argLen: 2, + name: "ComplexImag", + argLen: 1, generic: true, }, { - name: "Rsh16Ux8", - auxType: auxBool, + name: "StringMake", argLen: 2, generic: true, }, { - name: "Rsh16Ux16", - auxType: auxBool, - argLen: 2, + name: "StringPtr", + argLen: 1, generic: true, }, { - name: "Rsh16Ux32", - auxType: auxBool, - argLen: 2, + name: "StringLen", + argLen: 1, generic: true, }, { - name: "Rsh16Ux64", - auxType: auxBool, + name: "IMake", argLen: 2, generic: true, }, { - name: "Rsh32Ux8", - auxType: auxBool, - argLen: 2, + name: "ITab", + argLen: 1, generic: true, }, { - name: "Rsh32Ux16", - auxType: auxBool, - argLen: 2, + name: "IData", + argLen: 1, generic: true, }, { - name: "Rsh32Ux32", - auxType: auxBool, - argLen: 2, + name: "StructMake", + argLen: -1, generic: true, }, { - name: "Rsh32Ux64", - auxType: auxBool, - argLen: 2, + name: "StructSelect", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Rsh64Ux8", - auxType: auxBool, - argLen: 2, + name: "ArrayMake0", + argLen: 0, generic: true, }, { - name: "Rsh64Ux16", - auxType: auxBool, - argLen: 2, + name: "ArrayMake1", + argLen: 1, generic: true, }, { - name: "Rsh64Ux32", - auxType: auxBool, - argLen: 2, + name: "ArraySelect", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Rsh64Ux64", - auxType: auxBool, - argLen: 2, + name: "StoreReg", + argLen: 1, generic: true, }, { - name: "Eq8", - argLen: 2, - commutative: true, - generic: true, + name: "LoadReg", + argLen: 1, + generic: true, }, { - name: "Eq16", - argLen: 2, - commutative: true, - generic: true, + name: "FwdRef", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, }, { - name: "Eq32", - argLen: 2, - commutative: true, - generic: true, + name: "Unknown", + argLen: 0, + generic: true, }, { - name: "Eq64", - argLen: 2, - commutative: true, - generic: true, + name: "VarDef", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymNone, + generic: true, }, { - name: "EqPtr", - argLen: 2, - commutative: true, - generic: true, + name: "VarLive", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "KeepAlive", + argLen: 2, + zeroWidth: true, + generic: true, }, { - name: "EqInter", - argLen: 2, + name: "InlMark", + auxType: auxInt32, + argLen: 1, generic: true, }, { - name: "EqSlice", + name: "Int64Make", argLen: 2, generic: true, }, { - name: "Eq32F", - argLen: 2, - commutative: true, - generic: true, + name: "Int64Hi", + argLen: 1, + generic: true, }, { - name: "Eq64F", - argLen: 2, - commutative: true, - generic: true, + name: "Int64Lo", + argLen: 1, + generic: true, }, { - name: "Neq8", + name: "Add32carry", argLen: 2, commutative: true, generic: true, }, { - name: "Neq16", - argLen: 2, + name: "Add32withcarry", + argLen: 3, commutative: true, generic: true, }, { - name: "Neq32", - argLen: 2, - commutative: true, - generic: true, + name: "Sub32carry", + argLen: 2, + generic: true, }, { - name: "Neq64", - argLen: 2, - commutative: true, - generic: true, + name: "Sub32withcarry", + argLen: 3, + generic: true, }, { - name: "NeqPtr", - argLen: 2, + name: "Add64carry", + argLen: 3, commutative: true, generic: true, }, { - name: "NeqInter", - argLen: 2, + name: "Sub64borrow", + argLen: 3, generic: true, }, { - name: "NeqSlice", - argLen: 2, + name: "Signmask", + argLen: 1, generic: true, }, { - name: "Neq32F", - argLen: 2, - commutative: true, - generic: true, + name: "Zeromask", + argLen: 1, + generic: true, }, { - name: "Neq64F", - argLen: 2, - commutative: true, - generic: true, + name: "Slicemask", + argLen: 1, + generic: true, }, { - name: "Less8", + name: "SpectreIndex", argLen: 2, generic: true, }, { - name: "Less8U", + name: "SpectreSliceIndex", argLen: 2, generic: true, }, { - name: "Less16", - argLen: 2, + name: "Cvt32Uto32F", + argLen: 1, generic: true, }, { - name: "Less16U", - argLen: 2, + name: "Cvt32Uto64F", + argLen: 1, generic: true, }, { - name: "Less32", - argLen: 2, + name: "Cvt32Fto32U", + argLen: 1, generic: true, }, { - name: "Less32U", - argLen: 2, + name: "Cvt64Fto32U", + argLen: 1, generic: true, }, { - name: "Less64", - argLen: 2, + name: "Cvt64Uto32F", + argLen: 1, generic: true, }, { - name: "Less64U", - argLen: 2, + name: "Cvt64Uto64F", + argLen: 1, generic: true, }, { - name: "Less32F", - argLen: 2, + name: "Cvt32Fto64U", + argLen: 1, generic: true, }, { - name: "Less64F", - argLen: 2, + name: "Cvt64Fto64U", + argLen: 1, generic: true, }, { - name: "Leq8", + name: "Select0", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "Select1", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "MakeTuple", argLen: 2, generic: true, }, { - name: "Leq8U", - argLen: 2, + name: "SelectN", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Leq16", - argLen: 2, + name: "SelectNAddr", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Leq16U", - argLen: 2, + name: "MakeResult", + argLen: -1, generic: true, }, { - name: "Leq32", + name: "AtomicLoad8", argLen: 2, generic: true, }, { - name: "Leq32U", + name: "AtomicLoad32", argLen: 2, generic: true, }, { - name: "Leq64", + name: "AtomicLoad64", argLen: 2, generic: true, }, { - name: "Leq64U", + name: "AtomicLoadPtr", argLen: 2, generic: true, }, { - name: "Leq32F", + name: "AtomicLoadAcq32", argLen: 2, generic: true, }, { - name: "Leq64F", + name: "AtomicLoadAcq64", argLen: 2, generic: true, }, { - name: "CondSelect", - argLen: 3, - generic: true, + name: "AtomicStore8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "AndB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStore32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "OrB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStore64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "EqB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStorePtrNoWB", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "NeqB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStoreRel32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Not", - argLen: 1, - generic: true, + name: "AtomicStoreRel64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg8", - argLen: 1, - generic: true, + name: "AtomicExchange8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg16", - argLen: 1, - generic: true, + name: "AtomicExchange32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg32", - argLen: 1, - generic: true, + name: "AtomicExchange64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg64", - argLen: 1, - generic: true, + name: "AtomicAdd32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg32F", - argLen: 1, - generic: true, + name: "AtomicAdd64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg64F", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap32", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "Com8", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap64", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "Com16", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwapRel32", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "Com32", - argLen: 1, - generic: true, + name: "AtomicAnd8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Com64", - argLen: 1, - generic: true, + name: "AtomicOr8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz8", - argLen: 1, - generic: true, + name: "AtomicAnd32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz16", - argLen: 1, - generic: true, + name: "AtomicOr32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz32", - argLen: 1, - generic: true, + name: "AtomicAnd64value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz64", - argLen: 1, - generic: true, + name: "AtomicAnd32value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz64On32", - argLen: 2, - generic: true, + name: "AtomicAnd8value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz8NonZero", - argLen: 1, - generic: true, + name: "AtomicOr64value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz16NonZero", - argLen: 1, - generic: true, + name: "AtomicOr32value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz32NonZero", - argLen: 1, - generic: true, + name: "AtomicOr8value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz64NonZero", - argLen: 1, - generic: true, + name: "AtomicStore8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen8", - argLen: 1, - generic: true, + name: "AtomicStore32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen16", - argLen: 1, - generic: true, + name: "AtomicStore64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen32", - argLen: 1, - generic: true, + name: "AtomicAdd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen64", - argLen: 1, - generic: true, + name: "AtomicAdd64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Bswap16", - argLen: 1, - generic: true, + name: "AtomicExchange8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Bswap32", - argLen: 1, - generic: true, + name: "AtomicExchange32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Bswap64", - argLen: 1, - generic: true, + name: "AtomicExchange64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitRev8", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap32Variant", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "BitRev16", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap64Variant", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "BitRev32", - argLen: 1, - generic: true, + name: "AtomicAnd64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitRev64", - argLen: 1, - generic: true, + name: "AtomicOr64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount8", - argLen: 1, - generic: true, + name: "AtomicAnd32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount16", - argLen: 1, - generic: true, + name: "AtomicOr32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount32", - argLen: 1, - generic: true, + name: "AtomicAnd8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount64", - argLen: 1, - generic: true, + name: "AtomicOr8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "RotateLeft64", - argLen: 2, - generic: true, + name: "PubBarrier", + argLen: 1, + hasSideEffects: true, + generic: true, }, { - name: "RotateLeft32", - argLen: 2, - generic: true, + name: "Clobber", + auxType: auxSymOff, + argLen: 0, + symEffect: SymNone, + generic: true, }, { - name: "RotateLeft16", - argLen: 2, + name: "ClobberReg", + argLen: 0, generic: true, }, { - name: "RotateLeft8", - argLen: 2, - generic: true, + name: "PrefetchCache", + argLen: 2, + hasSideEffects: true, + generic: true, }, { - name: "Sqrt", - argLen: 1, - generic: true, + name: "PrefetchCacheStreamed", + argLen: 2, + hasSideEffects: true, + generic: true, }, { - name: "Sqrt32", - argLen: 1, + name: "Add32x4", + argLen: 2, generic: true, }, { - name: "Floor", - argLen: 1, + name: "ZeroSIMD", + argLen: 0, generic: true, }, { - name: "Ceil", - argLen: 1, - generic: true, + name: "AddFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Trunc", - argLen: 1, - generic: true, + name: "AndFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round", - argLen: 1, + name: "AndNotFloat32x16", + argLen: 2, generic: true, }, { - name: "RoundToEven", + name: "ApproximateReciprocalFloat32x16", argLen: 1, generic: true, }, { - name: "Abs", + name: "ApproximateReciprocalOfSqrtFloat32x16", argLen: 1, generic: true, }, { - name: "Copysign", + name: "DivFloat32x16", argLen: 2, generic: true, }, { - name: "Min64", - argLen: 2, + name: "EqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "FusedMultiplyAddFloat32x16", + argLen: 3, generic: true, }, { - name: "Max64", - argLen: 2, + name: "FusedMultiplyAddSubFloat32x16", + argLen: 3, generic: true, }, { - name: "Min64u", - argLen: 2, + name: "FusedMultiplySubAddFloat32x16", + argLen: 3, generic: true, }, { - name: "Max64u", + name: "GreaterFloat32x16", argLen: 2, generic: true, }, { - name: "Min64F", + name: "GreaterEqualFloat32x16", argLen: 2, generic: true, }, { - name: "Min32F", - argLen: 2, - generic: true, + name: "IsNanFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Max64F", + name: "LessFloat32x16", argLen: 2, generic: true, }, { - name: "Max32F", + name: "LessEqualFloat32x16", argLen: 2, generic: true, }, { - name: "FMA", - argLen: 3, - generic: true, + name: "MaskedAddFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Phi", - argLen: -1, - zeroWidth: true, - generic: true, + name: "MaskedAndFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Copy", - argLen: 1, + name: "MaskedAndNotFloat32x16", + argLen: 3, generic: true, }, { - name: "Convert", - argLen: 2, - resultInArg0: true, - zeroWidth: true, - generic: true, + name: "MaskedApproximateReciprocalFloat32x16", + argLen: 2, + generic: true, }, { - name: "ConstBool", - auxType: auxBool, - argLen: 0, + name: "MaskedApproximateReciprocalOfSqrtFloat32x16", + argLen: 2, generic: true, }, { - name: "ConstString", - auxType: auxString, - argLen: 0, + name: "MaskedDivFloat32x16", + argLen: 3, generic: true, }, { - name: "ConstNil", - argLen: 0, - generic: true, + name: "MaskedEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Const8", - auxType: auxInt8, - argLen: 0, + name: "MaskedFusedMultiplyAddFloat32x16", + argLen: 4, generic: true, }, { - name: "Const16", - auxType: auxInt16, - argLen: 0, + name: "MaskedFusedMultiplyAddSubFloat32x16", + argLen: 4, generic: true, }, { - name: "Const32", - auxType: auxInt32, - argLen: 0, + name: "MaskedFusedMultiplySubAddFloat32x16", + argLen: 4, generic: true, }, { - name: "Const64", - auxType: auxInt64, - argLen: 0, + name: "MaskedGreaterFloat32x16", + argLen: 3, generic: true, }, { - name: "Const32F", - auxType: auxFloat32, - argLen: 0, + name: "MaskedGreaterEqualFloat32x16", + argLen: 3, generic: true, }, { - name: "Const64F", - auxType: auxFloat64, - argLen: 0, - generic: true, + name: "MaskedIsNanFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ConstInterface", - argLen: 0, + name: "MaskedLessFloat32x16", + argLen: 3, generic: true, }, { - name: "ConstSlice", - argLen: 0, + name: "MaskedLessEqualFloat32x16", + argLen: 3, generic: true, }, { - name: "InitMem", - argLen: 0, - zeroWidth: true, - generic: true, + name: "MaskedMaxFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Arg", - auxType: auxSymOff, - argLen: 0, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "MaskedMinFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ArgIntReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "MaskedMulFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ArgFloatReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "MaskedMulByPowOf2Float32x16", + argLen: 3, + generic: true, }, { - name: "Addr", - auxType: auxSym, - argLen: 1, - symEffect: SymAddr, - generic: true, + name: "MaskedNotEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LocalAddr", - auxType: auxSym, - argLen: 2, - symEffect: SymAddr, - generic: true, + name: "MaskedOrFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SP", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "MaskedSqrtFloat32x16", + argLen: 2, + generic: true, }, { - name: "SB", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "MaskedSubFloat32x16", + argLen: 3, + generic: true, }, { - name: "SPanchored", - argLen: 2, - zeroWidth: true, - generic: true, + name: "MaskedXorFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Load", - argLen: 2, - generic: true, + name: "MaxFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Dereference", - argLen: 2, - generic: true, + name: "MinFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Store", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "MulFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Move", - auxType: auxTypSize, - argLen: 3, + name: "MulByPowOf2Float32x16", + argLen: 2, generic: true, }, { - name: "Zero", - auxType: auxTypSize, - argLen: 2, - generic: true, + name: "NotEqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StoreWB", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "OrFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MoveWB", - auxType: auxTypSize, - argLen: 3, + name: "SqrtFloat32x16", + argLen: 1, generic: true, }, { - name: "ZeroWB", - auxType: auxTypSize, + name: "SubFloat32x16", argLen: 2, generic: true, }, { - name: "WBend", - argLen: 1, - generic: true, + name: "XorFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "WB", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "AddFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "HasCPUFeature", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "AddSubFloat32x4", + argLen: 2, + generic: true, }, { - name: "PanicBounds", - auxType: auxInt64, - argLen: 3, - call: true, - generic: true, + name: "AndFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PanicExtend", - auxType: auxInt64, - argLen: 4, - call: true, + name: "AndNotFloat32x4", + argLen: 2, generic: true, }, { - name: "ClosureCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "ApproximateReciprocalFloat32x4", + argLen: 1, generic: true, }, { - name: "StaticCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "ApproximateReciprocalOfSqrtFloat32x4", + argLen: 1, generic: true, }, { - name: "InterCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CeilFloat32x4", + argLen: 1, generic: true, }, { - name: "TailCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "DivFloat32x4", + argLen: 2, generic: true, }, { - name: "ClosureLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "EqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StaticLECall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "FloorFloat32x4", + argLen: 1, generic: true, }, { - name: "InterLECall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "FusedMultiplyAddFloat32x4", + argLen: 3, generic: true, }, { - name: "TailLECall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "FusedMultiplyAddSubFloat32x4", + argLen: 3, generic: true, }, { - name: "SignExt8to16", - argLen: 1, + name: "FusedMultiplySubAddFloat32x4", + argLen: 3, generic: true, }, { - name: "SignExt8to32", - argLen: 1, + name: "GreaterFloat32x4", + argLen: 2, generic: true, }, { - name: "SignExt8to64", - argLen: 1, + name: "GreaterEqualFloat32x4", + argLen: 2, generic: true, }, { - name: "SignExt16to32", - argLen: 1, - generic: true, + name: "IsNanFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt16to64", - argLen: 1, + name: "LessFloat32x4", + argLen: 2, generic: true, }, { - name: "SignExt32to64", - argLen: 1, + name: "LessEqualFloat32x4", + argLen: 2, generic: true, }, { - name: "ZeroExt8to16", - argLen: 1, - generic: true, + name: "MaskedAddFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ZeroExt8to32", - argLen: 1, - generic: true, + name: "MaskedAndFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ZeroExt8to64", - argLen: 1, + name: "MaskedAndNotFloat32x4", + argLen: 3, generic: true, }, { - name: "ZeroExt16to32", - argLen: 1, + name: "MaskedApproximateReciprocalFloat32x4", + argLen: 2, generic: true, }, { - name: "ZeroExt16to64", - argLen: 1, + name: "MaskedApproximateReciprocalOfSqrtFloat32x4", + argLen: 2, generic: true, }, { - name: "ZeroExt32to64", - argLen: 1, + name: "MaskedDivFloat32x4", + argLen: 3, generic: true, }, { - name: "Trunc16to8", - argLen: 1, - generic: true, + name: "MaskedEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc32to8", - argLen: 1, + name: "MaskedFusedMultiplyAddFloat32x4", + argLen: 4, generic: true, }, { - name: "Trunc32to16", - argLen: 1, + name: "MaskedFusedMultiplyAddSubFloat32x4", + argLen: 4, generic: true, }, { - name: "Trunc64to8", - argLen: 1, + name: "MaskedFusedMultiplySubAddFloat32x4", + argLen: 4, generic: true, }, { - name: "Trunc64to16", - argLen: 1, + name: "MaskedGreaterFloat32x4", + argLen: 3, generic: true, }, { - name: "Trunc64to32", - argLen: 1, + name: "MaskedGreaterEqualFloat32x4", + argLen: 3, generic: true, }, { - name: "Cvt32to32F", - argLen: 1, - generic: true, + name: "MaskedIsNanFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32to64F", - argLen: 1, + name: "MaskedLessFloat32x4", + argLen: 3, generic: true, }, { - name: "Cvt64to32F", - argLen: 1, + name: "MaskedLessEqualFloat32x4", + argLen: 3, generic: true, }, { - name: "Cvt64to64F", - argLen: 1, - generic: true, + name: "MaskedMaxFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto32", - argLen: 1, - generic: true, + name: "MaskedMinFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto64", - argLen: 1, - generic: true, + name: "MaskedMulFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32", - argLen: 1, + name: "MaskedMulByPowOf2Float32x4", + argLen: 3, generic: true, }, { - name: "Cvt64Fto64", - argLen: 1, - generic: true, + name: "MaskedNotEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto64F", - argLen: 1, - generic: true, + name: "MaskedOrFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32F", - argLen: 1, + name: "MaskedSqrtFloat32x4", + argLen: 2, generic: true, }, { - name: "CvtBoolToUint8", - argLen: 1, + name: "MaskedSubFloat32x4", + argLen: 3, generic: true, }, { - name: "Round32F", - argLen: 1, - generic: true, + name: "MaskedXorFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Round64F", - argLen: 1, - generic: true, + name: "MaxFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsNonNil", - argLen: 1, - generic: true, + name: "MinFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsInBounds", - argLen: 2, - generic: true, + name: "MulFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsSliceInBounds", + name: "MulByPowOf2Float32x4", argLen: 2, generic: true, }, { - name: "NilCheck", - argLen: 2, - nilCheck: true, - generic: true, + name: "NotEqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetG", - argLen: 1, - zeroWidth: true, - generic: true, + name: "OrFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat32x4", + argLen: 2, + generic: true, }, { - name: "GetClosurePtr", - argLen: 0, + name: "PairwiseSubFloat32x4", + argLen: 2, generic: true, }, { - name: "GetCallerPC", - argLen: 0, + name: "RoundFloat32x4", + argLen: 1, generic: true, }, { - name: "GetCallerSP", + name: "SqrtFloat32x4", argLen: 1, generic: true, }, { - name: "PtrIndex", + name: "SubFloat32x4", argLen: 2, generic: true, }, { - name: "OffPtr", - auxType: auxInt64, + name: "TruncFloat32x4", argLen: 1, generic: true, }, { - name: "SliceMake", - argLen: 3, + name: "XorFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSubFloat32x8", + argLen: 2, generic: true, }, { - name: "SlicePtr", - argLen: 1, + name: "AndFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x8", + argLen: 2, generic: true, }, { - name: "SliceLen", + name: "ApproximateReciprocalFloat32x8", argLen: 1, generic: true, }, { - name: "SliceCap", + name: "ApproximateReciprocalOfSqrtFloat32x8", argLen: 1, generic: true, }, { - name: "SlicePtrUnchecked", + name: "CeilFloat32x8", argLen: 1, generic: true, }, { - name: "ComplexMake", + name: "DivFloat32x8", argLen: 2, generic: true, }, { - name: "ComplexReal", - argLen: 1, - generic: true, + name: "EqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ComplexImag", + name: "FloorFloat32x8", argLen: 1, generic: true, }, { - name: "StringMake", - argLen: 2, + name: "FusedMultiplyAddFloat32x8", + argLen: 3, generic: true, }, { - name: "StringPtr", - argLen: 1, + name: "FusedMultiplyAddSubFloat32x8", + argLen: 3, generic: true, }, { - name: "StringLen", - argLen: 1, + name: "FusedMultiplySubAddFloat32x8", + argLen: 3, generic: true, }, { - name: "IMake", + name: "GreaterFloat32x8", argLen: 2, generic: true, }, { - name: "ITab", - argLen: 1, + name: "GreaterEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "IData", - argLen: 1, - generic: true, + name: "IsNanFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StructMake", - argLen: -1, + name: "LessFloat32x8", + argLen: 2, generic: true, }, { - name: "StructSelect", - auxType: auxInt64, - argLen: 1, + name: "LessEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "ArrayMake0", - argLen: 0, - generic: true, + name: "MaskedAddFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ArrayMake1", - argLen: 1, + name: "MaskedAndFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x8", + argLen: 3, generic: true, }, { - name: "ArraySelect", - auxType: auxInt64, - argLen: 1, + name: "MaskedApproximateReciprocalFloat32x8", + argLen: 2, generic: true, }, { - name: "StoreReg", - argLen: 1, + name: "MaskedApproximateReciprocalOfSqrtFloat32x8", + argLen: 2, generic: true, }, { - name: "LoadReg", - argLen: 1, + name: "MaskedDivFloat32x8", + argLen: 3, generic: true, }, { - name: "FwdRef", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "MaskedEqualFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Unknown", - argLen: 0, + name: "MaskedFusedMultiplyAddFloat32x8", + argLen: 4, generic: true, }, { - name: "VarDef", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymNone, - generic: true, + name: "MaskedFusedMultiplyAddSubFloat32x8", + argLen: 4, + generic: true, }, { - name: "VarLive", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "MaskedFusedMultiplySubAddFloat32x8", + argLen: 4, + generic: true, }, { - name: "KeepAlive", - argLen: 2, - zeroWidth: true, - generic: true, + name: "MaskedGreaterFloat32x8", + argLen: 3, + generic: true, }, { - name: "InlMark", - auxType: auxInt32, - argLen: 1, + name: "MaskedGreaterEqualFloat32x8", + argLen: 3, generic: true, }, { - name: "Int64Make", - argLen: 2, - generic: true, + name: "MaskedIsNanFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Int64Hi", - argLen: 1, + name: "MaskedLessFloat32x8", + argLen: 3, generic: true, }, { - name: "Int64Lo", - argLen: 1, + name: "MaskedLessEqualFloat32x8", + argLen: 3, generic: true, }, { - name: "Add32carry", - argLen: 2, + name: "MaskedMaxFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Add32withcarry", + name: "MaskedMinFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "Sub32carry", - argLen: 2, - generic: true, + name: "MaskedMulFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Sub32withcarry", + name: "MaskedMulByPowOf2Float32x8", argLen: 3, generic: true, }, { - name: "Add64carry", + name: "MaskedNotEqualFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "Sub64borrow", - argLen: 3, - generic: true, + name: "MaskedOrFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Signmask", - argLen: 1, + name: "MaskedSqrtFloat32x8", + argLen: 2, generic: true, }, { - name: "Zeromask", - argLen: 1, + name: "MaskedSubFloat32x8", + argLen: 3, generic: true, }, { - name: "Slicemask", - argLen: 1, - generic: true, + name: "MaskedXorFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SpectreIndex", - argLen: 2, - generic: true, + name: "MaxFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SpectreSliceIndex", - argLen: 2, - generic: true, + name: "MinFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt32Uto32F", - argLen: 1, - generic: true, + name: "MulFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt32Uto64F", - argLen: 1, + name: "MulByPowOf2Float32x8", + argLen: 2, generic: true, }, { - name: "Cvt32Fto32U", - argLen: 1, - generic: true, + name: "NotEqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32U", - argLen: 1, - generic: true, + name: "OrFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Uto32F", - argLen: 1, + name: "PairwiseAddFloat32x8", + argLen: 2, generic: true, }, { - name: "Cvt64Uto64F", - argLen: 1, + name: "PairwiseSubFloat32x8", + argLen: 2, generic: true, }, { - name: "Cvt32Fto64U", + name: "RoundFloat32x8", argLen: 1, generic: true, }, { - name: "Cvt64Fto64U", + name: "SqrtFloat32x8", argLen: 1, generic: true, }, { - name: "Select0", - argLen: 1, - zeroWidth: true, - generic: true, - }, - { - name: "Select1", - argLen: 1, - zeroWidth: true, - generic: true, - }, - { - name: "MakeTuple", + name: "SubFloat32x8", argLen: 2, generic: true, }, { - name: "SelectN", - auxType: auxInt64, + name: "TruncFloat32x8", argLen: 1, generic: true, }, { - name: "SelectNAddr", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "XorFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MakeResult", - argLen: -1, - generic: true, + name: "AddFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicLoad8", + name: "AddSubFloat64x2", argLen: 2, generic: true, }, { - name: "AtomicLoad32", + name: "AndFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x2", argLen: 2, generic: true, }, { - name: "AtomicLoad64", - argLen: 2, + name: "ApproximateReciprocalFloat64x2", + argLen: 1, generic: true, }, { - name: "AtomicLoadPtr", - argLen: 2, + name: "ApproximateReciprocalOfSqrtFloat64x2", + argLen: 1, generic: true, }, { - name: "AtomicLoadAcq32", - argLen: 2, + name: "CeilFloat64x2", + argLen: 1, generic: true, }, { - name: "AtomicLoadAcq64", + name: "DivFloat64x2", argLen: 2, generic: true, }, { - name: "AtomicStore8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "DotProdBroadcastFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicStore32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "EqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicStore64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FloorFloat64x2", + argLen: 1, + generic: true, }, { - name: "AtomicStorePtrNoWB", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FusedMultiplyAddFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStoreRel32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FusedMultiplyAddSubFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStoreRel64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FusedMultiplySubAddFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicExchange8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicExchange32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterEqualFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicExchange64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "IsNanFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAdd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicAdd64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessEqualFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicCompareAndSwap32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedAddFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap64", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedAndFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwapRel32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedAndNotFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicAnd8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedApproximateReciprocalFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicOr8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedApproximateReciprocalOfSqrtFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicAnd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedDivFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicOr32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedFusedMultiplyAddFloat64x2", + argLen: 4, + generic: true, }, { - name: "AtomicAnd32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedFusedMultiplyAddSubFloat64x2", + argLen: 4, + generic: true, }, { - name: "AtomicAnd8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedFusedMultiplySubAddFloat64x2", + argLen: 4, + generic: true, }, { - name: "AtomicOr64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicOr32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterEqualFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicOr8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedIsNanFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicStore8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStore32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessEqualFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStore64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMaxFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMinFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMulFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMulByPowOf2Float64x2", + argLen: 3, + generic: true, }, { - name: "AtomicExchange32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedNotEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedOrFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap32Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedSqrtFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicCompareAndSwap64Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedSubFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicAnd64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedXorFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicOr64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaxFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MinFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MulFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MulByPowOf2Float64x2", + argLen: 2, + generic: true, }, { - name: "AtomicOr8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "NotEqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PubBarrier", - argLen: 1, - hasSideEffects: true, - generic: true, + name: "OrFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Clobber", - auxType: auxSymOff, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "PairwiseAddFloat64x2", + argLen: 2, + generic: true, }, { - name: "ClobberReg", - argLen: 0, + name: "PairwiseSubFloat64x2", + argLen: 2, generic: true, }, { - name: "PrefetchCache", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "RoundFloat64x2", + argLen: 1, + generic: true, }, { - name: "PrefetchCacheStreamed", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "SqrtFloat64x2", + argLen: 1, + generic: true, }, { - name: "Add32x4", + name: "SubFloat64x2", argLen: 2, generic: true, }, { - name: "ZeroSIMD", - argLen: 0, + name: "TruncFloat64x2", + argLen: 1, generic: true, }, { - name: "AddFloat32x16", + name: "XorFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AndFloat32x16", + name: "AddFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat32x16", + name: "AddSubFloat64x4", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat32x16", + name: "AndFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x4", argLen: 1, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x16", + name: "ApproximateReciprocalOfSqrtFloat64x4", argLen: 1, generic: true, }, { - name: "DivFloat32x16", + name: "CeilFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x4", argLen: 2, generic: true, }, { - name: "EqualFloat32x16", + name: "EqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "FusedMultiplyAddFloat32x16", + name: "FloorFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "FusedMultiplyAddFloat64x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x16", + name: "FusedMultiplyAddSubFloat64x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x16", + name: "FusedMultiplySubAddFloat64x4", argLen: 3, generic: true, }, { - name: "GreaterFloat32x16", + name: "GreaterFloat64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x16", + name: "GreaterEqualFloat64x4", argLen: 2, generic: true, }, { - name: "IsNanFloat32x16", + name: "IsNanFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x16", + name: "LessFloat64x4", argLen: 2, generic: true, }, { - name: "LessEqualFloat32x16", + name: "LessEqualFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x16", + name: "MaskedAddFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat32x16", + name: "MaskedAndFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat32x16", + name: "MaskedAndNotFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedApproximateReciprocalFloat32x16", + name: "MaskedApproximateReciprocalFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x16", + name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedDivFloat32x16", + name: "MaskedDivFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x16", + name: "MaskedEqualFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x16", + name: "MaskedFusedMultiplyAddFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x16", + name: "MaskedFusedMultiplyAddSubFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x16", + name: "MaskedFusedMultiplySubAddFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat32x16", + name: "MaskedGreaterFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x16", + name: "MaskedGreaterEqualFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x16", + name: "MaskedIsNanFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x16", + name: "MaskedLessFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualFloat32x16", + name: "MaskedLessEqualFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedMaxFloat32x16", + name: "MaskedMaxFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x16", + name: "MaskedMinFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x16", + name: "MaskedMulFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x16", + name: "MaskedMulByPowOf2Float64x4", argLen: 3, generic: true, }, { - name: "MaskedNotEqualFloat32x16", + name: "MaskedNotEqualFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrFloat32x16", + name: "MaskedOrFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x16", + name: "MaskedSqrtFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x16", + name: "MaskedSubFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedXorFloat32x16", + name: "MaskedXorFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxFloat32x16", + name: "MaxFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x16", + name: "MinFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x16", + name: "MulFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x16", + name: "MulByPowOf2Float64x4", argLen: 2, generic: true, }, { - name: "NotEqualFloat32x16", + name: "NotEqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrFloat32x16", + name: "OrFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "SqrtFloat32x16", + name: "PairwiseAddFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "RoundFloat64x4", argLen: 1, generic: true, }, { - name: "SubFloat32x16", + name: "SqrtFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x4", argLen: 2, generic: true, }, { - name: "XorFloat32x16", + name: "TruncFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "XorFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat32x4", + name: "AddFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat32x4", + name: "AndFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x8", argLen: 2, generic: true, }, { - name: "AndFloat32x4", + name: "ApproximateReciprocalFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat32x4", - argLen: 2, + name: "FusedMultiplyAddFloat64x8", + argLen: 3, generic: true, }, { - name: "ApproximateReciprocalFloat32x4", - argLen: 1, + name: "FusedMultiplyAddSubFloat64x8", + argLen: 3, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x4", - argLen: 1, + name: "FusedMultiplySubAddFloat64x8", + argLen: 3, generic: true, }, { - name: "CeilFloat32x4", - argLen: 1, + name: "GreaterFloat64x8", + argLen: 2, generic: true, }, { - name: "DivFloat32x4", + name: "GreaterEqualFloat64x8", argLen: 2, generic: true, }, { - name: "EqualFloat32x4", + name: "IsNanFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "FloorFloat32x4", - argLen: 1, + name: "LessFloat64x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddFloat32x4", + name: "LessEqualFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x8", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x4", + name: "MaskedApproximateReciprocalFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSubFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAddFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedGreaterFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x8", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x4", - argLen: 3, + name: "MaskedXorFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x8", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x8", + argLen: 2, generic: true, }, { - name: "GreaterFloat32x4", - argLen: 2, - generic: true, + name: "XorFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualFloat32x4", - argLen: 2, + name: "AbsoluteInt16x16", + argLen: 1, generic: true, }, { - name: "IsNanFloat32x4", + name: "AddInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x4", - argLen: 2, - generic: true, + name: "AndInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualFloat32x4", + name: "AndNotInt16x16", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x4", - argLen: 3, + name: "EqualInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt16x16", + argLen: 2, + generic: true, }, { - name: "MaskedAndNotFloat32x4", - argLen: 3, + name: "GreaterEqualInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalFloat32x4", + name: "LessInt16x16", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x4", + name: "LessEqualInt16x16", argLen: 2, generic: true, }, { - name: "MaskedDivFloat32x4", - argLen: 3, + name: "MaskedAbsoluteInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedEqualFloat32x4", + name: "MaskedAddInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x4", - argLen: 4, - generic: true, + name: "MaskedEqualInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x4", - argLen: 4, + name: "MaskedGreaterInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x4", - argLen: 4, + name: "MaskedGreaterEqualInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedGreaterFloat32x4", + name: "MaskedLessInt16x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x4", + name: "MaskedLessEqualInt16x16", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x4", + name: "MaskedMaxInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x4", - argLen: 3, - generic: true, + name: "MaskedMinInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedMaxFloat32x4", + name: "MaskedMulHighInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x4", + name: "MaskedMulLowInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x4", + name: "MaskedNotEqualInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x4", + name: "MaskedPairDotProdInt16x16", argLen: 3, generic: true, }, { - name: "MaskedNotEqualFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt16x16", + argLen: 2, + generic: true, }, { - name: "MaskedOrFloat32x4", + name: "MaskedSaturatedAddInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x4", - argLen: 2, + name: "MaskedSaturatedSubInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedSubFloat32x4", + name: "MaskedShiftLeftInt16x16", argLen: 3, generic: true, }, { - name: "MaskedXorFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromInt16x16", + argLen: 4, + generic: true, }, { - name: "MaxFloat32x4", + name: "MaskedShiftRightInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt16x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x4", + name: "MinInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "MulHighInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, + name: "MulLowInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "NotEqualFloat32x4", + name: "NotEqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "OrFloat32x4", + name: "OrInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat32x4", + name: "PairDotProdInt16x16", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat32x4", + name: "PairwiseAddInt16x16", argLen: 2, generic: true, }, { - name: "RoundFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "SqrtFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "SubFloat32x4", + name: "PairwiseSubInt16x16", argLen: 2, generic: true, }, { - name: "TruncFloat32x4", + name: "PopCountInt16x16", argLen: 1, generic: true, }, { - name: "XorFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddFloat32x8", + name: "SaturatedAddInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat32x8", + name: "SaturatedPairwiseAddInt16x16", argLen: 2, generic: true, }, { - name: "AndFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x8", + name: "SaturatedPairwiseSubInt16x16", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat32x8", - argLen: 1, + name: "SaturatedSubInt16x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x8", - argLen: 1, + name: "ShiftAllLeftInt16x16", + argLen: 2, generic: true, }, { - name: "CeilFloat32x8", - argLen: 1, + name: "ShiftAllRightInt16x16", + argLen: 2, generic: true, }, { - name: "DivFloat32x8", + name: "ShiftAllRightSignExtendedInt16x16", argLen: 2, generic: true, }, { - name: "EqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt16x16", + argLen: 2, + generic: true, }, { - name: "FloorFloat32x8", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt16x16", + argLen: 3, generic: true, }, { - name: "FusedMultiplyAddFloat32x8", - argLen: 3, + name: "ShiftRightInt16x16", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x8", + name: "ShiftRightAndFillUpperFromInt16x16", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x8", - argLen: 3, + name: "ShiftRightSignExtendedInt16x16", + argLen: 2, generic: true, }, { - name: "GreaterFloat32x8", + name: "SignInt16x16", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x8", + name: "SubInt16x16", argLen: 2, generic: true, }, { - name: "IsNanFloat32x8", + name: "XorInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "LessEqualFloat32x8", - argLen: 2, + name: "AbsoluteInt16x32", + argLen: 1, generic: true, }, { - name: "MaskedAddFloat32x8", - argLen: 3, + name: "AddInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndFloat32x8", - argLen: 3, + name: "EqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat32x8", - argLen: 3, + name: "GreaterInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalFloat32x8", + name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x8", + name: "LessInt16x32", argLen: 2, generic: true, }, { - name: "MaskedDivFloat32x8", - argLen: 3, + name: "LessEqualInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedEqualFloat32x8", + name: "MaskedAbsoluteInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x8", - argLen: 4, - generic: true, + name: "MaskedEqualInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x8", - argLen: 4, + name: "MaskedGreaterInt16x32", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x8", - argLen: 4, + name: "MaskedGreaterEqualInt16x32", + argLen: 3, generic: true, }, { - name: "MaskedGreaterFloat32x8", + name: "MaskedLessInt16x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x8", + name: "MaskedLessEqualInt16x32", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x8", + name: "MaskedMaxInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x8", - argLen: 3, - generic: true, + name: "MaskedMinInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedMaxFloat32x8", + name: "MaskedMulHighInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x8", + name: "MaskedMulLowInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x8", + name: "MaskedNotEqualInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x8", + name: "MaskedPairDotProdInt16x32", argLen: 3, generic: true, }, { - name: "MaskedNotEqualFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt16x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrFloat32x8", + name: "MaskedSaturatedAddInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x8", - argLen: 2, + name: "MaskedSaturatedSubInt16x32", + argLen: 3, generic: true, }, { - name: "MaskedSubFloat32x8", + name: "MaskedShiftLeftInt16x32", argLen: 3, generic: true, }, { - name: "MaskedXorFloat32x8", - argLen: 3, + name: "MaskedShiftLeftAndFillUpperFromInt16x32", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt16x32", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxFloat32x8", + name: "MinInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x8", + name: "MulHighInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "MulLowInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x8", + name: "NotEqualInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairDotProdInt16x32", argLen: 2, generic: true, }, { - name: "NotEqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt16x32", + argLen: 1, + generic: true, }, { - name: "OrFloat32x8", + name: "SaturatedAddInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat32x8", + name: "SaturatedSubInt16x32", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat32x8", + name: "ShiftLeftInt16x32", argLen: 2, generic: true, }, { - name: "RoundFloat32x8", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt16x32", + argLen: 3, generic: true, }, { - name: "SqrtFloat32x8", - argLen: 1, + name: "ShiftRightInt16x32", + argLen: 2, generic: true, }, { - name: "SubFloat32x8", + name: "ShiftRightAndFillUpperFromInt16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt16x32", argLen: 2, generic: true, }, { - name: "TruncFloat32x8", + name: "SubInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteInt16x8", argLen: 1, generic: true, }, { - name: "XorFloat32x8", + name: "AddInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat64x2", + name: "AndInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat64x2", + name: "AndNotInt16x8", argLen: 2, generic: true, }, { - name: "AndFloat64x2", + name: "EqualInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat64x2", + name: "GreaterInt16x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x2", - argLen: 1, + name: "GreaterEqualInt16x8", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x2", - argLen: 1, + name: "LessInt16x8", + argLen: 2, generic: true, }, { - name: "CeilFloat64x2", - argLen: 1, + name: "LessEqualInt16x8", + argLen: 2, generic: true, }, { - name: "DivFloat64x2", + name: "MaskedAbsoluteInt16x8", argLen: 2, generic: true, }, { - name: "DotProdBroadcastFloat64x2", - argLen: 2, + name: "MaskedAddInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualFloat64x2", - argLen: 2, + name: "MaskedEqualInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "FloorFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x2", + name: "MaskedGreaterInt16x8", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x2", + name: "MaskedGreaterEqualInt16x8", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat64x2", + name: "MaskedLessInt16x8", argLen: 3, generic: true, }, { - name: "GreaterFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualFloat64x2", - argLen: 2, + name: "MaskedLessEqualInt16x8", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x2", - argLen: 2, + name: "MaskedMaxInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x2", - argLen: 2, - generic: true, + name: "MaskedMinInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x2", - argLen: 2, - generic: true, + name: "MaskedMulHighInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedAddFloat64x2", + name: "MaskedMulLowInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat64x2", + name: "MaskedNotEqualInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat64x2", + name: "MaskedPairDotProdInt16x8", argLen: 3, generic: true, }, { - name: "MaskedApproximateReciprocalFloat64x2", + name: "MaskedPopCountInt16x8", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x2", - argLen: 2, - generic: true, + name: "MaskedSaturatedAddInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedDivFloat64x2", + name: "MaskedSaturatedSubInt16x8", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftInt16x8", + argLen: 3, + generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x2", + name: "MaskedShiftLeftAndFillUpperFromInt16x8", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x2", - argLen: 4, + name: "MaskedShiftRightInt16x8", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x2", + name: "MaskedShiftRightAndFillUpperFromInt16x8", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat64x2", + name: "MaskedShiftRightSignExtendedInt16x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x2", + name: "MaskedSubInt16x8", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x2", - argLen: 3, + name: "MaxInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x2", - argLen: 3, + name: "MinInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x2", - argLen: 3, + name: "MulHighInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x2", - argLen: 3, + name: "MulLowInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x2", - argLen: 3, - generic: true, + name: "NotEqualInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedNotEqualFloat64x2", - argLen: 3, + name: "OrInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "PairDotProdInt16x8", + argLen: 2, + generic: true, }, { - name: "MaskedSqrtFloat64x2", + name: "PairwiseAddInt16x8", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x2", - argLen: 3, + name: "PairwiseSubInt16x8", + argLen: 2, generic: true, }, { - name: "MaskedXorFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt16x8", + argLen: 1, + generic: true, }, { - name: "MaxFloat64x2", + name: "SaturatedAddInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedPairwiseAddInt16x8", + argLen: 2, + generic: true, }, { - name: "MulFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedPairwiseSubInt16x8", + argLen: 2, + generic: true, }, { - name: "MulByPowOf2Float64x2", + name: "SaturatedSubInt16x8", argLen: 2, generic: true, }, { - name: "NotEqualFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x8", + argLen: 2, + generic: true, }, { - name: "OrFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt16x8", + argLen: 2, + generic: true, }, { - name: "PairwiseAddFloat64x2", + name: "ShiftAllRightSignExtendedInt16x8", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat64x2", + name: "ShiftLeftInt16x8", argLen: 2, generic: true, }, { - name: "RoundFloat64x2", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt16x8", + argLen: 3, generic: true, }, { - name: "SqrtFloat64x2", - argLen: 1, + name: "ShiftRightInt16x8", + argLen: 2, generic: true, }, { - name: "SubFloat64x2", + name: "ShiftRightAndFillUpperFromInt16x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt16x8", argLen: 2, generic: true, }, { - name: "TruncFloat64x2", + name: "SignInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x16", argLen: 1, generic: true, }, { - name: "XorFloat64x2", + name: "AddInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat64x4", + name: "AndInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat64x4", + name: "AndNotInt32x16", argLen: 2, generic: true, }, { - name: "AndFloat64x4", + name: "EqualInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat64x4", + name: "GreaterInt32x16", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x4", - argLen: 1, + name: "GreaterEqualInt32x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x4", - argLen: 1, + name: "LessInt32x16", + argLen: 2, generic: true, }, { - name: "CeilFloat64x4", - argLen: 1, + name: "LessEqualInt32x16", + argLen: 2, generic: true, }, { - name: "DivFloat64x4", + name: "MaskedAbsoluteInt32x16", argLen: 2, generic: true, }, { - name: "EqualFloat64x4", - argLen: 2, + name: "MaskedAddInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "FloorFloat64x4", - argLen: 1, - generic: true, + name: "MaskedAndInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddFloat64x4", + name: "MaskedAndNotInt32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x4", + name: "MaskedEqualInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat64x4", + name: "MaskedGreaterEqualInt32x16", argLen: 3, generic: true, }, { - name: "GreaterFloat64x4", - argLen: 2, + name: "MaskedLessInt32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualFloat64x4", - argLen: 2, + name: "MaskedLessEqualInt32x16", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x4", - argLen: 2, + name: "MaskedMaxInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x4", - argLen: 2, - generic: true, + name: "MaskedMinInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x4", - argLen: 2, - generic: true, + name: "MaskedMulLowInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedAddFloat64x4", + name: "MaskedNotEqualInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat64x4", + name: "MaskedOrInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat64x4", - argLen: 3, + name: "MaskedPairDotProdAccumulateInt32x16", + argLen: 4, generic: true, }, { - name: "MaskedApproximateReciprocalFloat64x4", + name: "MaskedPopCountInt32x16", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x4", - argLen: 2, + name: "MaskedRotateLeftInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x4", + name: "MaskedRotateRightInt32x16", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSaturatedPairDotProdAccumulateInt32x16", + argLen: 4, + generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x4", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x4", - argLen: 4, + name: "MaskedShiftLeftInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x4", + name: "MaskedShiftLeftAndFillUpperFromInt32x16", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat64x4", + name: "MaskedShiftRightInt32x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x4", - argLen: 3, + name: "MaskedShiftRightAndFillUpperFromInt32x16", + argLen: 4, generic: true, }, { - name: "MaskedIsNanFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftRightSignExtendedInt32x16", + argLen: 3, + generic: true, }, { - name: "MaskedLessFloat64x4", + name: "MaskedSubInt32x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualFloat64x4", - argLen: 3, + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 4, generic: true, }, { - name: "MaskedMaxFloat64x4", + name: "MaskedXorInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x4", - argLen: 3, + name: "MaxInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x4", - argLen: 3, + name: "MinInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x4", - argLen: 3, - generic: true, + name: "MulLowInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedNotEqualFloat64x4", - argLen: 3, + name: "NotEqualInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrFloat64x4", - argLen: 3, + name: "OrInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x4", - argLen: 2, + name: "PairDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedSubFloat64x4", - argLen: 3, + name: "PopCountInt32x16", + argLen: 1, generic: true, }, { - name: "MaskedXorFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftInt32x16", + argLen: 2, + generic: true, }, { - name: "MaxFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "RotateRightInt32x16", + argLen: 2, + generic: true, }, { - name: "MinFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedPairDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "MulFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "MulByPowOf2Float64x4", + name: "ShiftLeftInt32x16", argLen: 2, generic: true, }, { - name: "NotEqualFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromInt32x16", + argLen: 3, + generic: true, }, { - name: "OrFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightInt32x16", + argLen: 2, + generic: true, }, { - name: "PairwiseAddFloat64x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x16", + argLen: 3, generic: true, }, { - name: "PairwiseSubFloat64x4", + name: "ShiftRightSignExtendedInt32x16", argLen: 2, generic: true, }, { - name: "RoundFloat64x4", - argLen: 1, + name: "SubInt32x16", + argLen: 2, generic: true, }, { - name: "SqrtFloat64x4", - argLen: 1, + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "SubFloat64x4", - argLen: 2, - generic: true, + name: "XorInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "TruncFloat64x4", + name: "AbsoluteInt32x4", argLen: 1, generic: true, }, { - name: "XorFloat64x4", + name: "AddInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat64x8", + name: "AndInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndFloat64x8", + name: "AndNotInt32x4", + argLen: 2, + generic: true, + }, + { + name: "EqualInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat64x8", + name: "GreaterInt32x4", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x8", - argLen: 1, + name: "GreaterEqualInt32x4", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x8", - argLen: 1, + name: "LessInt32x4", + argLen: 2, generic: true, }, { - name: "DivFloat64x8", + name: "LessEqualInt32x4", argLen: 2, generic: true, }, { - name: "EqualFloat64x8", - argLen: 2, + name: "MaskedAbsoluteInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "FusedMultiplyAddFloat64x8", + name: "MaskedAndInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x8", + name: "MaskedEqualInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat64x8", + name: "MaskedGreaterEqualInt32x4", argLen: 3, generic: true, }, { - name: "GreaterFloat64x8", - argLen: 2, + name: "MaskedLessInt32x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualFloat64x8", - argLen: 2, + name: "MaskedLessEqualInt32x4", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x8", - argLen: 2, + name: "MaskedMaxInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x8", - argLen: 2, - generic: true, + name: "MaskedMinInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x8", - argLen: 2, - generic: true, + name: "MaskedMulLowInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedAddFloat64x8", + name: "MaskedNotEqualInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat64x8", + name: "MaskedOrInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat64x8", - argLen: 3, + name: "MaskedPairDotProdAccumulateInt32x4", + argLen: 4, generic: true, }, { - name: "MaskedApproximateReciprocalFloat64x8", + name: "MaskedPopCountInt32x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x8", - argLen: 2, + name: "MaskedRotateLeftInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x8", + name: "MaskedRotateRightInt32x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSaturatedPairDotProdAccumulateInt32x4", + argLen: 4, + generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x8", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x8", - argLen: 4, + name: "MaskedShiftLeftInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x8", + name: "MaskedShiftLeftAndFillUpperFromInt32x4", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat64x8", + name: "MaskedShiftRightInt32x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x8", - argLen: 3, + name: "MaskedShiftRightAndFillUpperFromInt32x4", + argLen: 4, generic: true, }, { - name: "MaskedIsNanFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftRightSignExtendedInt32x4", + argLen: 3, + generic: true, }, { - name: "MaskedLessFloat64x8", + name: "MaskedSubInt32x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualFloat64x8", - argLen: 3, + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 4, generic: true, }, { - name: "MaskedMaxFloat64x8", + name: "MaskedXorInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x8", - argLen: 3, + name: "MaxInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x8", - argLen: 3, + name: "MinInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x8", - argLen: 3, - generic: true, + name: "MulEvenWidenInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedNotEqualFloat64x8", - argLen: 3, + name: "MulLowInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrFloat64x8", - argLen: 3, + name: "NotEqualInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x8", + name: "OrInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, + { + name: "PairwiseAddInt32x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x8", + name: "PairwiseSubInt32x4", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt32x4", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftInt32x4", + argLen: 2, + generic: true, + }, + { + name: "RotateRightInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SaturatedPairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, { - name: "MaskedXorFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, }, { - name: "MaxFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt32x4", + argLen: 2, + generic: true, }, { - name: "MinFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt32x4", + argLen: 2, + generic: true, }, { - name: "MulFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightSignExtendedInt32x4", + argLen: 2, + generic: true, }, { - name: "MulByPowOf2Float64x8", + name: "ShiftLeftInt32x4", argLen: 2, generic: true, }, { - name: "NotEqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromInt32x4", + argLen: 3, + generic: true, }, { - name: "OrFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightInt32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromInt32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt32x4", + argLen: 2, + generic: true, }, { - name: "SqrtFloat64x8", - argLen: 1, + name: "SignInt32x4", + argLen: 2, generic: true, }, { - name: "SubFloat64x8", + name: "SubInt32x4", argLen: 2, generic: true, }, { - name: "XorFloat64x8", + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, + { + name: "XorInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt16x16", + name: "AbsoluteInt32x8", argLen: 1, generic: true, }, { - name: "AddInt16x16", + name: "AddInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt16x16", + name: "AndInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt16x16", + name: "AndNotInt32x8", argLen: 2, generic: true, }, { - name: "EqualInt16x16", + name: "EqualInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt16x16", + name: "GreaterInt32x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x16", + name: "GreaterEqualInt32x8", argLen: 2, generic: true, }, { - name: "LessInt16x16", + name: "LessInt32x8", argLen: 2, generic: true, }, { - name: "LessEqualInt16x16", + name: "LessEqualInt32x8", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt16x16", + name: "MaskedAbsoluteInt32x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x16", + name: "MaskedAddInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt16x16", + name: "MaskedAndInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt16x16", + name: "MaskedAndNotInt32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x16", + name: "MaskedEqualInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt16x16", + name: "MaskedGreaterEqualInt32x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x16", + name: "MaskedLessInt32x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x16", + name: "MaskedLessEqualInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt16x16", + name: "MaskedMinInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x16", + name: "MaskedMulLowInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x16", + name: "MaskedNotEqualInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x16", + name: "MaskedOrInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x16", - argLen: 3, + name: "MaskedPairDotProdAccumulateInt32x8", + argLen: 4, generic: true, }, { - name: "MaskedPopCountInt16x16", + name: "MaskedPopCountInt32x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftInt32x8", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubInt16x16", + name: "MaskedRotateRightInt32x8", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x16", + name: "MaskedSaturatedPairDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftLeftInt32x8", argLen: 3, generic: true, }, { - name: "MaxInt16x16", + name: "MaskedShiftLeftAndFillUpperFromInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedXorInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x16", + name: "MinInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x16", + name: "MulEvenWidenInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x16", + name: "MulLowInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x16", + name: "NotEqualInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt16x16", + name: "OrInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdInt16x16", - argLen: 2, + name: "PairDotProdAccumulateInt32x8", + argLen: 3, generic: true, }, { - name: "PairwiseAddInt16x16", + name: "PairwiseAddInt32x8", argLen: 2, generic: true, }, { - name: "PairwiseSubInt16x16", + name: "PairwiseSubInt32x8", argLen: 2, generic: true, }, { - name: "PopCountInt16x16", + name: "PopCountInt32x8", argLen: 1, generic: true, }, { - name: "SaturatedAddInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftInt32x8", + argLen: 2, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x16", + name: "RotateRightInt32x8", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x16", + name: "SaturatedPairDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllLeftInt32x8", argLen: 2, generic: true, }, { - name: "SaturatedSubInt16x16", + name: "ShiftAllRightInt32x8", argLen: 2, generic: true, }, { - name: "SignInt16x16", + name: "ShiftAllRightSignExtendedInt32x8", argLen: 2, generic: true, }, { - name: "SubInt16x16", + name: "ShiftLeftInt32x8", argLen: 2, generic: true, }, { - name: "XorInt16x16", + name: "ShiftLeftAndFillUpperFromInt32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightInt32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromInt32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SignInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt32x8", + argLen: 2, + generic: true, + }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "XorInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt16x32", + name: "AbsoluteInt64x2", argLen: 1, generic: true, }, { - name: "AddInt16x32", + name: "AddInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "EqualInt16x32", + name: "AndInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt16x32", + name: "AndNotInt64x2", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x32", + name: "EqualInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x2", argLen: 2, generic: true, }, { - name: "LessInt16x32", + name: "GreaterEqualInt64x2", argLen: 2, generic: true, }, { - name: "LessEqualInt16x32", + name: "LessInt64x2", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt16x32", + name: "LessEqualInt64x2", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x32", + name: "MaskedAbsoluteInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt16x32", + name: "MaskedAndInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt16x32", + name: "MaskedAndNotInt64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x32", + name: "MaskedEqualInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x2", argLen: 3, generic: true, }, { - name: "MaskedLessInt16x32", + name: "MaskedGreaterEqualInt64x2", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x32", + name: "MaskedLessInt64x2", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x32", + name: "MaskedLessEqualInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt16x32", + name: "MaskedMinInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x32", + name: "MaskedMulEvenWidenInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x32", + name: "MaskedMulLowInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x32", + name: "MaskedNotEqualInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x32", + name: "MaskedOrInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedRotateLeftInt64x2", argLen: 3, generic: true, }, { - name: "MaskedPopCountInt16x32", - argLen: 2, + name: "MaskedRotateRightInt64x2", + argLen: 3, generic: true, }, { - name: "MaskedSaturatedAddInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftAllLeftInt64x2", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubInt16x32", + name: "MaskedShiftAllRightInt64x2", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x32", + name: "MaskedShiftAllRightSignExtendedInt64x2", argLen: 3, generic: true, }, { - name: "MaxInt16x32", + name: "MaskedShiftLeftInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromInt64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x32", + name: "MinInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x32", + name: "MulEvenWidenInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x32", + name: "MulLowInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x32", + name: "NotEqualInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdInt16x32", + name: "OrInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x2", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftInt64x2", argLen: 2, generic: true, }, { - name: "PopCountInt16x32", - argLen: 1, + name: "RotateRightInt64x2", + argLen: 2, generic: true, }, { - name: "SaturatedAddInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt64x2", + argLen: 2, + generic: true, }, { - name: "SaturatedSubInt16x32", + name: "ShiftAllRightInt64x2", argLen: 2, generic: true, }, { - name: "SubInt16x32", + name: "ShiftAllRightSignExtendedInt64x2", argLen: 2, generic: true, }, { - name: "AbsoluteInt16x8", + name: "ShiftLeftInt64x2", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromInt64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightInt64x2", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromInt64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "SubInt64x2", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x4", argLen: 1, generic: true, }, { - name: "AddInt16x8", + name: "AddInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt16x8", + name: "AndInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt16x8", + name: "AndNotInt64x4", argLen: 2, generic: true, }, { - name: "EqualInt16x8", + name: "EqualInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt16x8", + name: "GreaterInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x8", + name: "GreaterEqualInt64x4", argLen: 2, generic: true, }, { - name: "LessInt16x8", + name: "LessInt64x4", argLen: 2, generic: true, }, { - name: "LessEqualInt16x8", + name: "LessEqualInt64x4", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt16x8", + name: "MaskedAbsoluteInt64x4", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x8", + name: "MaskedAddInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt16x8", + name: "MaskedAndInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt16x8", + name: "MaskedAndNotInt64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x8", + name: "MaskedEqualInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x4", argLen: 3, generic: true, }, { - name: "MaskedLessInt16x8", + name: "MaskedGreaterEqualInt64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x8", + name: "MaskedLessInt64x4", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x8", + name: "MaskedLessEqualInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt16x8", + name: "MaskedMinInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x8", + name: "MaskedMulEvenWidenInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x8", + name: "MaskedMulLowInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x8", + name: "MaskedNotEqualInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x8", + name: "MaskedOrInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedRotateLeftInt64x4", argLen: 3, generic: true, }, { - name: "MaskedPopCountInt16x8", - argLen: 2, + name: "MaskedRotateRightInt64x4", + argLen: 3, generic: true, }, { - name: "MaskedSaturatedAddInt16x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftAllLeftInt64x4", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubInt16x8", + name: "MaskedShiftAllRightInt64x4", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x8", + name: "MaskedShiftAllRightSignExtendedInt64x4", argLen: 3, generic: true, }, { - name: "MaxInt16x8", + name: "MaskedShiftLeftInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromInt64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x8", + name: "MinInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", + name: "MulEvenWidenInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x8", + name: "MulLowInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x8", + name: "NotEqualInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt16x8", + name: "OrInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdInt16x8", - argLen: 2, + name: "PopCountInt64x4", + argLen: 1, generic: true, }, { - name: "PairwiseAddInt16x8", + name: "RotateLeftInt64x4", argLen: 2, generic: true, }, { - name: "PairwiseSubInt16x8", + name: "RotateRightInt64x4", argLen: 2, generic: true, }, { - name: "PopCountInt16x8", - argLen: 1, + name: "ShiftAllLeftInt64x4", + argLen: 2, generic: true, }, { - name: "SaturatedAddInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt64x4", + argLen: 2, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x8", + name: "ShiftAllRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x8", + name: "ShiftLeftInt64x4", argLen: 2, generic: true, }, { - name: "SaturatedSubInt16x8", + name: "ShiftLeftAndFillUpperFromInt64x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightInt64x4", argLen: 2, generic: true, }, { - name: "SignInt16x8", + name: "ShiftRightAndFillUpperFromInt64x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "SubInt16x8", + name: "SubInt64x4", argLen: 2, generic: true, }, { - name: "XorInt16x8", + name: "XorInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt32x16", + name: "AbsoluteInt64x8", argLen: 1, generic: true, }, { - name: "AddInt32x16", + name: "AddInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x16", + name: "AndInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x16", + name: "AndNotInt64x8", argLen: 2, generic: true, }, { - name: "EqualInt32x16", + name: "EqualInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x16", + name: "GreaterInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x16", + name: "GreaterEqualInt64x8", argLen: 2, generic: true, }, { - name: "LessInt32x16", + name: "LessInt64x8", argLen: 2, generic: true, }, { - name: "LessEqualInt32x16", + name: "LessEqualInt64x8", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt32x16", + name: "MaskedAbsoluteInt64x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x16", + name: "MaskedAddInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndInt32x16", + name: "MaskedAndInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x16", + name: "MaskedAndNotInt64x8", argLen: 3, generic: true, }, { - name: "MaskedEqualInt32x16", + name: "MaskedEqualInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x16", + name: "MaskedGreaterInt64x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt32x16", + name: "MaskedGreaterEqualInt64x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt32x16", + name: "MaskedLessInt64x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt32x16", + name: "MaskedLessEqualInt64x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x16", + name: "MaskedMaxInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt32x16", + name: "MaskedMinInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x16", + name: "MaskedMulEvenWidenInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x16", + name: "MaskedMulLowInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrInt32x16", + name: "MaskedNotEqualInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x16", - argLen: 4, - generic: true, + name: "MaskedOrInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedPopCountInt32x16", + name: "MaskedPopCountInt64x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x16", - argLen: 4, + name: "MaskedRotateLeftInt64x8", + argLen: 3, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "MaskedRotateRightInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllLeftInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightSignExtendedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromInt64x8", argLen: 4, generic: true, }, { - name: "MaskedSubInt32x16", + name: "MaskedShiftRightInt64x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "MaskedShiftRightAndFillUpperFromInt64x8", argLen: 4, generic: true, }, { - name: "MaskedXorInt32x16", + name: "MaskedShiftRightSignExtendedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x16", + name: "MaxInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x16", + name: "MinInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x16", + name: "MulEvenWidenInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x16", + name: "MulLowInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt32x16", + name: "NotEqualInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x16", - argLen: 3, - generic: true, + name: "OrInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCountInt32x16", + name: "PopCountInt64x8", argLen: 1, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x16", - argLen: 3, + name: "RotateLeftInt64x8", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "RotateRightInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightSignExtendedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromInt64x8", argLen: 3, generic: true, }, { - name: "SubInt32x16", + name: "ShiftRightInt64x8", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + name: "ShiftRightAndFillUpperFromInt64x8", argLen: 3, generic: true, }, { - name: "XorInt32x16", + name: "ShiftRightSignExtendedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt64x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt32x4", + name: "AbsoluteInt8x16", argLen: 1, generic: true, }, { - name: "AddInt32x4", + name: "AddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x4", + name: "AndInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x4", + name: "AndNotInt8x16", argLen: 2, generic: true, }, { - name: "EqualInt32x4", + name: "EqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x4", + name: "GreaterInt8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x4", + name: "GreaterEqualInt8x16", argLen: 2, generic: true, }, { - name: "LessInt32x4", + name: "LessInt8x16", argLen: 2, generic: true, }, { - name: "LessEqualInt32x4", + name: "LessEqualInt8x16", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt32x4", + name: "MaskedAbsoluteInt8x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt32x4", + name: "MaskedAddInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x4", + name: "MaskedEqualInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x4", + name: "MaskedGreaterInt8x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt32x4", + name: "MaskedGreaterEqualInt8x16", argLen: 3, generic: true, }, { - name: "MaskedLessInt32x4", + name: "MaskedLessInt8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt32x4", + name: "MaskedLessEqualInt8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x4", + name: "MaskedMaxInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt32x4", + name: "MaskedMinInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x4", + name: "MaskedNotEqualInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt8x16", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt32x4", + name: "MaskedSaturatedAddInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x4", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedPairDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedSubInt32x4", + name: "MaskedSaturatedSubInt8x16", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, + name: "MaskedSubInt8x16", + argLen: 3, generic: true, }, { - name: "MaskedXorInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxInt32x4", + name: "MaxInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x4", + name: "MinInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt32x4", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x4", + name: "OrInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt8x16", + argLen: 1, + generic: true, }, { - name: "OrInt32x4", + name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "PairwiseAddInt32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x4", + name: "SaturatedSubInt8x16", argLen: 2, generic: true, }, { - name: "PopCountInt32x4", - argLen: 1, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SignInt32x4", + name: "SignInt8x16", argLen: 2, generic: true, }, { - name: "SubInt32x4", + name: "SubInt8x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "XorInt32x4", + name: "XorInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt32x8", + name: "AbsoluteInt8x32", argLen: 1, generic: true, }, { - name: "AddInt32x8", + name: "AddInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x8", + name: "AndInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x8", + name: "AndNotInt8x32", argLen: 2, generic: true, }, { - name: "EqualInt32x8", + name: "EqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x8", + name: "GreaterInt8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x8", + name: "GreaterEqualInt8x32", argLen: 2, generic: true, }, { - name: "LessInt32x8", + name: "LessInt8x32", argLen: 2, generic: true, }, { - name: "LessEqualInt32x8", + name: "LessEqualInt8x32", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt32x8", + name: "MaskedAbsoluteInt8x32", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt32x8", + name: "MaskedAddInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x8", + name: "MaskedEqualInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x8", + name: "MaskedGreaterInt8x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt32x8", + name: "MaskedGreaterEqualInt8x32", argLen: 3, generic: true, }, { - name: "MaskedLessInt32x8", + name: "MaskedLessInt8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt32x8", + name: "MaskedLessEqualInt8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x8", + name: "MaskedMaxInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt32x8", + name: "MaskedMinInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x8", + name: "MaskedNotEqualInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt8x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt32x8", + name: "MaskedSaturatedAddInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x8", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedPairDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedSubInt32x8", + name: "MaskedSaturatedSubInt8x32", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, + name: "MaskedSubInt8x32", + argLen: 3, generic: true, }, { - name: "MaskedXorInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxInt32x8", + name: "MaxInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x8", + name: "MinInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt32x8", + name: "NotEqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x8", + name: "OrInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt8x32", + argLen: 1, + generic: true, }, { - name: "OrInt32x8", + name: "SaturatedAddInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "PairwiseAddInt32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x8", + name: "SaturatedSubInt8x32", argLen: 2, generic: true, }, { - name: "PopCountInt32x8", - argLen: 1, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SignInt32x8", + name: "SignInt8x32", argLen: 2, generic: true, }, { - name: "SubInt32x8", + name: "SubInt8x32", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "XorInt32x8", + name: "XorInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x2", + name: "AbsoluteInt8x64", argLen: 1, generic: true, }, { - name: "AddInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt64x2", + name: "AddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x2", - argLen: 2, - generic: true, - }, - { - name: "EqualInt64x2", + name: "EqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x2", + name: "GreaterInt8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualInt64x2", + name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, { - name: "LessInt64x2", + name: "LessInt8x64", argLen: 2, generic: true, }, { - name: "LessEqualInt64x2", + name: "LessEqualInt8x64", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x2", + name: "MaskedAbsoluteInt8x64", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt64x2", + name: "MaskedAddInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x2", + name: "MaskedEqualInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt64x2", + name: "MaskedGreaterInt8x64", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x2", + name: "MaskedGreaterEqualInt8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt64x2", + name: "MaskedLessInt8x64", argLen: 3, generic: true, }, - { - name: "MaskedMaxInt64x2", - argLen: 3, - commutative: true, - generic: true, + { + name: "MaskedLessEqualInt8x64", + argLen: 3, + generic: true, }, { - name: "MaskedMinInt64x2", + name: "MaskedMaxInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x2", + name: "MaskedMinInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x2", + name: "MaskedNotEqualInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt8x64", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt64x2", + name: "MaskedSaturatedAddInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x2", - argLen: 2, + name: "MaskedSaturatedSubInt8x64", + argLen: 3, generic: true, }, { - name: "MaskedSubInt64x2", + name: "MaskedSubInt8x64", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt64x2", + name: "MaxInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x2", + name: "MinInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x2", + name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x2", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt8x64", + argLen: 1, + generic: true, }, { - name: "OrInt64x2", + name: "SaturatedAddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x2", - argLen: 1, - generic: true, - }, - { - name: "SubInt64x2", + name: "SaturatedSubInt8x64", argLen: 2, generic: true, }, { - name: "XorInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt64x4", - argLen: 1, + name: "SubInt8x64", + argLen: 2, generic: true, }, { - name: "AddInt64x4", + name: "AddUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x4", + name: "AndUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x4", + name: "AndNotUint16x16", argLen: 2, generic: true, }, { - name: "EqualInt64x4", + name: "AverageUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x4", - argLen: 2, - generic: true, + name: "EqualUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x4", + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "LessInt64x4", + name: "GreaterEqualUint16x16", argLen: 2, generic: true, }, { - name: "LessEqualInt64x4", + name: "LessUint16x16", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x4", + name: "LessEqualUint16x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x4", + name: "MaskedAddUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndInt64x4", + name: "MaskedAverageUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x4", + name: "MaskedEqualUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x4", + name: "MaskedGreaterUint16x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt64x4", + name: "MaskedGreaterEqualUint16x16", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x4", + name: "MaskedLessUint16x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt64x4", + name: "MaskedLessEqualUint16x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt64x4", + name: "MaskedMaxUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt64x4", + name: "MaskedMinUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x4", + name: "MaskedMulHighUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x4", + name: "MaskedNotEqualUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint16x16", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt64x4", + name: "MaskedSaturatedAddUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x4", - argLen: 2, + name: "MaskedSaturatedSubUint16x16", + argLen: 3, generic: true, }, { - name: "MaskedSubInt64x4", + name: "MaskedShiftLeftUint16x16", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromUint16x16", + argLen: 4, + generic: true, }, { - name: "MaxInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedShiftRightUint16x16", + argLen: 3, + generic: true, }, { - name: "MinInt64x4", + name: "MaskedShiftRightAndFillUpperFromUint16x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x4", + name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x4", + name: "MulHighUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x4", + name: "NotEqualUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x4", + name: "OrUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x4", - argLen: 1, + name: "PairwiseAddUint16x16", + argLen: 2, generic: true, }, { - name: "SubInt64x4", + name: "PairwiseSubUint16x16", argLen: 2, generic: true, }, { - name: "XorInt64x4", + name: "PopCountUint16x16", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x8", - argLen: 1, + name: "SaturatedSubUint16x16", + argLen: 2, generic: true, }, { - name: "AddInt64x8", + name: "ShiftAllLeftUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint16x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromUint16x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SubUint16x16", + argLen: 2, + generic: true, + }, + { + name: "XorUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x8", + name: "AddUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x8", - argLen: 2, - generic: true, - }, - { - name: "EqualInt64x8", + name: "AverageUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x8", - argLen: 2, - generic: true, + name: "EqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x8", + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "LessInt64x8", + name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, { - name: "LessEqualInt64x8", + name: "LessUint16x32", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x8", + name: "LessEqualUint16x32", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x8", + name: "MaskedAddUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndInt64x8", + name: "MaskedAverageUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x8", + name: "MaskedEqualUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x8", + name: "MaskedGreaterUint16x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt64x8", + name: "MaskedGreaterEqualUint16x32", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x8", + name: "MaskedLessUint16x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt64x8", + name: "MaskedLessEqualUint16x32", argLen: 3, generic: true, }, { - name: "MaskedMaxInt64x8", + name: "MaskedMaxUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt64x8", + name: "MaskedMinUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x8", + name: "MaskedMulHighUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x8", + name: "MaskedNotEqualUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint16x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt64x8", + name: "MaskedSaturatedAddUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x8", - argLen: 2, + name: "MaskedSaturatedSubUint16x32", + argLen: 3, generic: true, }, { - name: "MaskedSubInt64x8", + name: "MaskedShiftLeftUint16x32", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromUint16x32", + argLen: 4, + generic: true, }, { - name: "MaxInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedShiftRightUint16x32", + argLen: 3, + generic: true, }, { - name: "MinInt64x8", + name: "MaskedShiftRightAndFillUpperFromUint16x32", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x32", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "MinUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", + name: "MulHighUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x8", + name: "NotEqualUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x8", + name: "PopCountUint16x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x8", - argLen: 1, + name: "SaturatedSubUint16x32", + argLen: 2, generic: true, }, { - name: "SubInt64x8", + name: "ShiftLeftUint16x32", argLen: 2, generic: true, }, { - name: "XorInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint16x32", + argLen: 3, + generic: true, }, { - name: "AbsoluteInt8x16", - argLen: 1, + name: "ShiftRightUint16x32", + argLen: 2, generic: true, }, { - name: "AddInt8x16", + name: "ShiftRightAndFillUpperFromUint16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "SubUint16x32", + argLen: 2, + generic: true, + }, + { + name: "AddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt8x16", + name: "AndUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt8x16", + name: "AndNotUint16x8", argLen: 2, generic: true, }, { - name: "EqualInt8x16", + name: "AverageUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt8x16", - argLen: 2, - generic: true, + name: "EqualUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt8x16", + name: "GreaterUint16x8", argLen: 2, generic: true, }, { - name: "LessInt8x16", + name: "GreaterEqualUint16x8", argLen: 2, generic: true, }, { - name: "LessEqualInt8x16", + name: "LessUint16x8", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt8x16", + name: "LessEqualUint16x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x16", + name: "MaskedAddUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt8x16", + name: "MaskedAverageUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt8x16", + name: "MaskedEqualUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint16x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt8x16", + name: "MaskedGreaterEqualUint16x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt8x16", + name: "MaskedLessUint16x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x16", + name: "MaskedLessEqualUint16x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x16", + name: "MaskedMaxUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt8x16", + name: "MaskedMinUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x16", + name: "MaskedMulHighUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x16", + name: "MaskedNotEqualUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint16x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt8x16", + name: "MaskedSaturatedAddUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x16", + name: "MaskedSaturatedSubUint16x8", argLen: 3, generic: true, }, { - name: "MaskedSubInt8x16", + name: "MaskedShiftLeftUint16x8", argLen: 3, generic: true, }, { - name: "MaxInt8x16", + name: "MaskedShiftLeftAndFillUpperFromUint16x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint16x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x8", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x16", + name: "MinUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x16", + name: "MulHighUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt8x16", + name: "NotEqualUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt8x16", - argLen: 1, - generic: true, - }, - { - name: "SaturatedAddInt8x16", + name: "OrUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt8x16", + name: "PairwiseAddUint16x8", argLen: 2, generic: true, }, { - name: "SubInt8x16", + name: "PairwiseSubUint16x8", argLen: 2, generic: true, }, { - name: "XorInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt8x32", + name: "PopCountUint16x8", argLen: 1, generic: true, }, { - name: "AddInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt8x32", + name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt8x32", + name: "SaturatedSubUint16x8", argLen: 2, generic: true, }, { - name: "EqualInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt8x32", + name: "ShiftAllLeftUint16x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt8x32", + name: "ShiftAllRightUint16x8", argLen: 2, generic: true, }, { - name: "LessInt8x32", + name: "ShiftLeftUint16x8", argLen: 2, generic: true, }, { - name: "LessEqualInt8x32", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint16x8", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt8x32", + name: "ShiftRightUint16x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt8x32", + name: "ShiftRightAndFillUpperFromUint16x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt8x32", - argLen: 3, + name: "ShiftRightSignExtendedUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt8x32", - argLen: 3, + name: "SubUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt8x32", - argLen: 3, + name: "XorUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt8x32", - argLen: 3, + name: "AddUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x32", - argLen: 3, + name: "AndUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x32", + name: "AndNotUint32x16", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt8x32", - argLen: 3, + name: "EqualUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x32", - argLen: 3, + name: "GreaterUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedSubInt8x32", - argLen: 3, + name: "GreaterEqualUint32x16", + argLen: 2, generic: true, }, { - name: "MaxInt8x32", - argLen: 2, - commutative: true, - generic: true, + name: "LessUint32x16", + argLen: 2, + generic: true, }, { - name: "MinInt8x32", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint32x16", + argLen: 2, + generic: true, }, { - name: "NotEqualInt8x32", - argLen: 2, + name: "MaskedAddUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "OrInt8x32", - argLen: 2, + name: "MaskedAndUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "PopCountInt8x32", - argLen: 1, + name: "MaskedAndNotUint32x16", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt8x32", - argLen: 2, + name: "MaskedEqualUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x32", - argLen: 2, + name: "MaskedGreaterUint32x16", + argLen: 3, generic: true, }, { - name: "SignInt8x32", - argLen: 2, + name: "MaskedGreaterEqualUint32x16", + argLen: 3, generic: true, }, { - name: "SubInt8x32", - argLen: 2, + name: "MaskedLessUint32x16", + argLen: 3, generic: true, }, { - name: "XorInt8x32", - argLen: 2, + name: "MaskedLessEqualUint32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "AbsoluteInt8x64", - argLen: 1, - generic: true, + name: "MaskedMinUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddInt8x64", - argLen: 2, + name: "MaskedNotEqualUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualInt8x64", - argLen: 2, + name: "MaskedOrUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x64", + name: "MaskedPopCountUint32x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt8x64", - argLen: 2, + name: "MaskedRotateLeftUint32x16", + argLen: 3, generic: true, }, { - name: "LessInt8x64", - argLen: 2, + name: "MaskedRotateRightUint32x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt8x64", - argLen: 2, + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, generic: true, }, { - name: "MaskedAbsoluteInt8x64", - argLen: 2, + name: "MaskedShiftLeftUint32x16", + argLen: 3, generic: true, }, { - name: "MaskedAddInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromUint32x16", + argLen: 4, + generic: true, }, { - name: "MaskedEqualInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftRightUint32x16", + argLen: 3, + generic: true, }, { - name: "MaskedGreaterInt8x64", - argLen: 3, + name: "MaskedShiftRightAndFillUpperFromUint32x16", + argLen: 4, generic: true, }, { - name: "MaskedGreaterEqualInt8x64", + name: "MaskedShiftRightSignExtendedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedLessInt8x64", + name: "MaskedSubUint32x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x64", - argLen: 3, + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, generic: true, }, { - name: "MaskedMaxInt8x64", + name: "MaskedXorUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt8x64", - argLen: 3, + name: "MaxUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x64", - argLen: 3, + name: "MinUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x64", - argLen: 2, - generic: true, + name: "NotEqualUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSaturatedAddInt8x64", - argLen: 3, + name: "OrUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x64", - argLen: 3, + name: "PopCountUint32x16", + argLen: 1, generic: true, }, { - name: "MaskedSubInt8x64", - argLen: 3, + name: "RotateLeftUint32x16", + argLen: 2, generic: true, }, { - name: "MaxInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "RotateRightUint32x16", + argLen: 2, + generic: true, }, { - name: "MinInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, }, { - name: "NotEqualInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftUint32x16", + argLen: 2, + generic: true, }, { - name: "PopCountInt8x64", - argLen: 1, + name: "ShiftLeftAndFillUpperFromUint32x16", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightUint32x16", + argLen: 2, + generic: true, }, { - name: "SaturatedSubInt8x64", + name: "ShiftRightAndFillUpperFromUint32x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint32x16", argLen: 2, generic: true, }, { - name: "SubInt8x64", + name: "SubUint32x16", argLen: 2, generic: true, }, { - name: "AddUint16x16", + name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, + }, + { + name: "XorUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint16x16", + name: "AddUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint16x16", - argLen: 2, - generic: true, - }, - { - name: "AverageUint16x16", + name: "AndUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x16", + name: "AndNotUint32x4", + argLen: 2, + generic: true, + }, + { + name: "EqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint16x16", + name: "GreaterUint32x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x16", + name: "GreaterEqualUint32x4", argLen: 2, generic: true, }, { - name: "LessUint16x16", + name: "LessUint32x4", argLen: 2, generic: true, }, { - name: "LessEqualUint16x16", + name: "LessEqualUint32x4", argLen: 2, generic: true, }, { - name: "MaskedAddUint16x16", + name: "MaskedAddUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x16", + name: "MaskedAndUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x16", + name: "MaskedAndNotUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x16", + name: "MaskedGreaterUint32x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint16x16", + name: "MaskedGreaterEqualUint32x4", argLen: 3, generic: true, }, { - name: "MaskedLessUint16x16", + name: "MaskedLessUint32x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint16x16", + name: "MaskedLessEqualUint32x4", argLen: 3, generic: true, }, { - name: "MaskedMaxUint16x16", + name: "MaskedMaxUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint16x16", + name: "MaskedMinUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x16", + name: "MaskedNotEqualUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x16", + name: "MaskedOrUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x16", + name: "MaskedPopCountUint32x4", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftUint32x4", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubUint16x16", + name: "MaskedRotateRightUint32x4", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x16", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftLeftUint32x4", argLen: 3, generic: true, }, { - name: "MaxUint16x16", + name: "MaskedShiftLeftAndFillUpperFromUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedXorUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x16", + name: "MinUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x16", + name: "MulEvenWidenUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint16x16", + name: "NotEqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint16x16", + name: "OrUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint16x16", + name: "PairwiseAddUint32x4", argLen: 2, generic: true, }, { - name: "PairwiseSubUint16x16", + name: "PairwiseSubUint32x4", argLen: 2, generic: true, }, { - name: "PopCountUint16x16", + name: "PopCountUint32x4", argLen: 1, generic: true, }, { - name: "SaturatedAddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftUint32x4", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint16x16", + name: "RotateRightUint32x4", argLen: 2, generic: true, }, { - name: "SubUint16x16", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllLeftUint32x4", argLen: 2, generic: true, }, { - name: "XorUint16x16", + name: "ShiftAllRightUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "SubUint32x4", + argLen: 2, + generic: true, + }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, + { + name: "XorUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint16x32", + name: "AddUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AverageUint16x32", + name: "AndUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x32", + name: "AndNotUint32x8", + argLen: 2, + generic: true, + }, + { + name: "EqualUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint16x32", + name: "GreaterUint32x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x32", + name: "GreaterEqualUint32x8", argLen: 2, generic: true, }, { - name: "LessUint16x32", + name: "LessUint32x8", argLen: 2, generic: true, }, { - name: "LessEqualUint16x32", + name: "LessEqualUint32x8", argLen: 2, generic: true, }, { - name: "MaskedAddUint16x32", + name: "MaskedAddUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x32", + name: "MaskedAndUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x32", + name: "MaskedAndNotUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x32", + name: "MaskedGreaterUint32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint16x32", + name: "MaskedGreaterEqualUint32x8", argLen: 3, generic: true, }, { - name: "MaskedLessUint16x32", + name: "MaskedLessUint32x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint16x32", + name: "MaskedLessEqualUint32x8", argLen: 3, generic: true, }, { - name: "MaskedMaxUint16x32", + name: "MaskedMaxUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint16x32", + name: "MaskedMinUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x32", + name: "MaskedNotEqualUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x32", + name: "MaskedOrUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x32", + name: "MaskedPopCountUint32x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftUint32x8", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubUint16x32", + name: "MaskedRotateRightUint32x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x32", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftLeftUint32x8", argLen: 3, generic: true, }, { - name: "MaxUint16x32", + name: "MaskedShiftLeftAndFillUpperFromUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedXorUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x32", + name: "MinUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x32", + name: "MulEvenWidenUint32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualUint32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddUint32x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubUint32x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountUint32x8", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftUint32x8", + argLen: 2, + generic: true, + }, + { + name: "RotateRightUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllLeftUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint32x8", + argLen: 3, + generic: true, }, { - name: "PopCountUint16x32", - argLen: 1, + name: "ShiftRightUint32x8", + argLen: 2, generic: true, }, { - name: "SaturatedAddUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint32x8", + argLen: 3, + generic: true, }, { - name: "SaturatedSubUint16x32", + name: "ShiftRightSignExtendedUint32x8", argLen: 2, generic: true, }, { - name: "SubUint16x32", + name: "SubUint32x8", argLen: 2, generic: true, }, { - name: "AddUint16x8", + name: "UnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, + { + name: "XorUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint16x8", + name: "AddUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint16x8", - argLen: 2, - generic: true, - }, - { - name: "AverageUint16x8", + name: "AndUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x8", + name: "AndNotUint64x2", + argLen: 2, + generic: true, + }, + { + name: "EqualUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint16x8", + name: "GreaterUint64x2", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x8", + name: "GreaterEqualUint64x2", argLen: 2, generic: true, }, { - name: "LessUint16x8", + name: "LessUint64x2", argLen: 2, generic: true, }, { - name: "LessEqualUint16x8", + name: "LessEqualUint64x2", argLen: 2, generic: true, }, { - name: "MaskedAddUint16x8", + name: "MaskedAddUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x8", + name: "MaskedAndUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x8", + name: "MaskedAndNotUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x8", + name: "MaskedGreaterUint64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint16x8", + name: "MaskedGreaterEqualUint64x2", argLen: 3, generic: true, }, { - name: "MaskedLessUint16x8", + name: "MaskedLessUint64x2", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint16x8", + name: "MaskedLessEqualUint64x2", argLen: 3, generic: true, }, { - name: "MaskedMaxUint16x8", + name: "MaskedMaxUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint16x8", + name: "MaskedMinUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x8", + name: "MaskedMulEvenWidenUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x8", + name: "MaskedNotEqualUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x8", + name: "MaskedOrUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x2", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftUint64x2", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubUint16x8", + name: "MaskedRotateRightUint64x2", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x8", + name: "MaskedShiftAllLeftUint64x2", argLen: 3, generic: true, }, { - name: "MaxUint16x8", + name: "MaskedShiftAllRightUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromUint64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x8", + name: "MinUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x8", + name: "MulEvenWidenUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint16x8", + name: "NotEqualUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint16x8", + name: "OrUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint16x8", + name: "PopCountUint64x2", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftUint64x2", argLen: 2, generic: true, }, { - name: "PairwiseSubUint16x8", + name: "RotateRightUint64x2", argLen: 2, generic: true, }, { - name: "PopCountUint16x8", - argLen: 1, + name: "ShiftAllLeftUint64x2", + argLen: 2, generic: true, }, { - name: "SaturatedAddUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightUint64x2", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint16x8", + name: "ShiftLeftUint64x2", argLen: 2, generic: true, }, { - name: "SubUint16x8", + name: "ShiftLeftAndFillUpperFromUint64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightUint64x2", argLen: 2, generic: true, }, { - name: "XorUint16x8", + name: "ShiftRightAndFillUpperFromUint64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "SubUint64x2", + argLen: 2, + generic: true, + }, + { + name: "XorUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x16", + name: "AddUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x16", + name: "AndUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x16", + name: "AndNotUint64x4", argLen: 2, generic: true, }, { - name: "EqualUint32x16", + name: "EqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint32x16", + name: "GreaterUint64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x16", + name: "GreaterEqualUint64x4", argLen: 2, generic: true, }, { - name: "LessUint32x16", + name: "LessUint64x4", argLen: 2, generic: true, }, { - name: "LessEqualUint32x16", + name: "LessEqualUint64x4", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x16", + name: "MaskedAddUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint32x16", + name: "MaskedAndUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x16", + name: "MaskedAndNotUint64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualUint32x16", + name: "MaskedEqualUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x16", + name: "MaskedGreaterUint64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x16", + name: "MaskedGreaterEqualUint64x4", argLen: 3, generic: true, }, { - name: "MaskedLessUint32x16", + name: "MaskedLessUint64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x16", + name: "MaskedLessEqualUint64x4", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x16", + name: "MaskedMaxUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint32x16", + name: "MaskedMinUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x16", + name: "MaskedMulEvenWidenUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x16", + name: "MaskedNotEqualUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x16", + name: "MaskedOrUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x4", argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + name: "MaskedRotateLeftUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedRotateRightUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllLeftUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromUint64x4", argLen: 4, generic: true, }, { - name: "MaskedSubUint32x16", + name: "MaskedShiftRightUint64x4", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", + name: "MaskedShiftRightAndFillUpperFromUint64x4", argLen: 4, generic: true, }, { - name: "MaskedXorUint32x16", + name: "MaskedShiftRightSignExtendedUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x16", + name: "MaxUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x16", + name: "MinUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x16", + name: "MulEvenWidenUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x16", + name: "NotEqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountUint32x16", + name: "OrUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountUint64x4", argLen: 1, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + name: "RotateLeftUint64x4", + argLen: 2, + generic: true, + }, + { + name: "RotateRightUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint64x4", argLen: 3, generic: true, }, { - name: "SubUint32x16", + name: "ShiftRightUint64x4", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + name: "ShiftRightAndFillUpperFromUint64x4", argLen: 3, generic: true, }, { - name: "XorUint32x16", + name: "ShiftRightSignExtendedUint64x4", + argLen: 2, + generic: true, + }, + { + name: "SubUint64x4", + argLen: 2, + generic: true, + }, + { + name: "XorUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x4", + name: "AddUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x4", + name: "AndUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x4", + name: "AndNotUint64x8", argLen: 2, generic: true, }, { - name: "EqualUint32x4", + name: "EqualUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint32x4", + name: "GreaterUint64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x4", + name: "GreaterEqualUint64x8", argLen: 2, generic: true, }, { - name: "LessUint32x4", + name: "LessUint64x8", argLen: 2, generic: true, }, { - name: "LessEqualUint32x4", + name: "LessEqualUint64x8", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x4", + name: "MaskedAddUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint32x4", + name: "MaskedAndUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x4", + name: "MaskedAndNotUint64x8", argLen: 3, generic: true, }, { - name: "MaskedEqualUint32x4", + name: "MaskedEqualUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x4", + name: "MaskedGreaterUint64x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x4", + name: "MaskedGreaterEqualUint64x8", argLen: 3, generic: true, }, { - name: "MaskedLessUint32x4", + name: "MaskedLessUint64x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x4", + name: "MaskedLessEqualUint64x8", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x4", + name: "MaskedMaxUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint32x4", + name: "MaskedMinUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x4", + name: "MaskedMulEvenWidenUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x4", + name: "MaskedNotEqualUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x4", + name: "MaskedOrUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + name: "MaskedRotateLeftUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedRotateRightUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllLeftUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromUint64x8", argLen: 4, generic: true, }, { - name: "MaskedSubUint32x4", + name: "MaskedShiftRightUint64x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", + name: "MaskedShiftRightAndFillUpperFromUint64x8", argLen: 4, generic: true, }, { - name: "MaskedXorUint32x4", + name: "MaskedShiftRightSignExtendedUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x4", + name: "MaxUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x4", + name: "MinUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x4", + name: "MulEvenWidenUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x4", + name: "NotEqualUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x4", + name: "OrUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint32x4", + name: "PopCountUint64x8", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftUint64x8", argLen: 2, generic: true, }, { - name: "PairwiseSubUint32x4", + name: "RotateRightUint64x8", argLen: 2, generic: true, }, { - name: "PopCountUint32x4", - argLen: 1, + name: "ShiftAllLeftUint64x8", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + name: "ShiftAllRightUint64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint64x8", argLen: 3, generic: true, }, { - name: "SubUint32x4", + name: "ShiftRightUint64x8", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + name: "ShiftRightAndFillUpperFromUint64x8", argLen: 3, generic: true, }, { - name: "XorUint32x4", + name: "ShiftRightSignExtendedUint64x8", + argLen: 2, + generic: true, + }, + { + name: "SubUint64x8", + argLen: 2, + generic: true, + }, + { + name: "XorUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x8", + name: "AddUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x8", + name: "AndUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x8", + name: "AndNotUint8x16", argLen: 2, generic: true, }, { - name: "EqualUint32x8", + name: "AverageUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint32x8", + name: "EqualUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterUint8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x8", + name: "GreaterEqualUint8x16", argLen: 2, generic: true, }, { - name: "LessUint32x8", + name: "LessUint8x16", argLen: 2, generic: true, }, { - name: "LessEqualUint32x8", + name: "LessEqualUint8x16", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x8", + name: "MaskedAddUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint32x8", + name: "MaskedAverageUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint32x8", + name: "MaskedEqualUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x8", + name: "MaskedGreaterUint8x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x8", + name: "MaskedGreaterEqualUint8x16", argLen: 3, generic: true, }, { - name: "MaskedLessUint32x8", + name: "MaskedLessUint8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x8", + name: "MaskedLessEqualUint8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint32x8", + name: "MaskedMaxUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x8", + name: "MaskedMinUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x8", + name: "MaskedNotEqualUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x8", + name: "MaskedPopCountUint8x16", argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, - generic: true, + name: "MaskedSaturatedAddUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedSubUint32x8", + name: "MaskedSaturatedSubUint8x16", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 3, generic: true, }, { - name: "MaskedXorUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedSubUint8x16", + argLen: 3, + generic: true, }, { - name: "MinUint32x8", + name: "MaxUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x8", + name: "MinUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x8", + name: "NotEqualUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x8", + name: "OrUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x8", - argLen: 2, + name: "PopCountUint8x16", + argLen: 1, generic: true, }, { - name: "PopCountUint32x8", - argLen: 1, - generic: true, + name: "SaturatedAddUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, + name: "SaturatedSubUint8x16", + argLen: 2, generic: true, }, { - name: "SubUint32x8", + name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, + name: "SubUint8x16", + argLen: 2, generic: true, }, { - name: "XorUint32x8", + name: "XorUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint64x2", + name: "AddUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint64x2", + name: "AndUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint64x2", + name: "AndNotUint8x32", argLen: 2, generic: true, }, { - name: "EqualUint64x2", + name: "AverageUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint64x2", + name: "EqualUint8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint64x2", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "LessUint64x2", + name: "LessUint8x32", argLen: 2, generic: true, }, { - name: "LessEqualUint64x2", + name: "LessEqualUint8x32", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x2", + name: "MaskedAddUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint64x2", + name: "MaskedAverageUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint64x2", + name: "MaskedEqualUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint64x2", + name: "MaskedGreaterUint8x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x2", + name: "MaskedGreaterEqualUint8x32", argLen: 3, generic: true, }, { - name: "MaskedLessUint64x2", + name: "MaskedLessUint8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x2", + name: "MaskedLessEqualUint8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxUint64x2", + name: "MaskedMaxUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x2", + name: "MaskedMinUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x2", + name: "MaskedNotEqualUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint8x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrUint64x2", + name: "MaskedSaturatedAddUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x2", - argLen: 2, + name: "MaskedSaturatedSubUint8x32", + argLen: 3, generic: true, }, { - name: "MaskedSubUint64x2", + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSubUint8x32", + argLen: 3, + generic: true, }, { - name: "MaxUint64x2", + name: "MaxUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x2", + name: "MinUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x2", + name: "NotEqualUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint64x2", + name: "OrUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint64x2", + name: "PopCountUint8x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, + name: "SaturatedSubUint8x32", + argLen: 2, generic: true, }, { - name: "SubUint64x2", + name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLen: 2, generic: true, }, { - name: "XorUint64x2", + name: "SubUint8x32", + argLen: 2, + generic: true, + }, + { + name: "XorUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint64x4", + name: "AddUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint64x4", + name: "AverageUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint64x4", - argLen: 2, - generic: true, - }, - { - name: "EqualUint64x4", + name: "EqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint64x4", + name: "GreaterUint8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualUint64x4", + name: "GreaterEqualUint8x64", argLen: 2, generic: true, }, { - name: "LessUint64x4", + name: "LessUint8x64", argLen: 2, generic: true, }, { - name: "LessEqualUint64x4", + name: "LessEqualUint8x64", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x4", + name: "MaskedAddUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint64x4", + name: "MaskedAverageUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint64x4", + name: "MaskedEqualUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint64x4", + name: "MaskedGreaterUint8x64", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x4", + name: "MaskedGreaterEqualUint8x64", argLen: 3, generic: true, }, { - name: "MaskedLessUint64x4", + name: "MaskedLessUint8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x4", + name: "MaskedLessEqualUint8x64", argLen: 3, generic: true, }, { - name: "MaskedMaxUint64x4", + name: "MaskedMaxUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x4", + name: "MaskedMinUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x4", + name: "MaskedNotEqualUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint8x64", + argLen: 2, + generic: true, }, { - name: "MaskedOrUint64x4", + name: "MaskedSaturatedAddUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x4", - argLen: 2, + name: "MaskedSaturatedSubUint8x64", + argLen: 3, generic: true, }, { - name: "MaskedSubUint64x4", + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSubUint8x64", + argLen: 3, + generic: true, }, { - name: "MaxUint64x4", + name: "MaxUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x4", + name: "MinUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x4", + name: "NotEqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint8x64", + argLen: 1, + generic: true, }, { - name: "OrUint64x4", + name: "SaturatedAddUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, + name: "SaturatedSubUint8x64", + argLen: 2, generic: true, }, { - name: "SubUint64x4", + name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLen: 2, generic: true, }, { - name: "XorUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint8x64", + argLen: 2, + generic: true, }, { - name: "AddUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndNotUint64x8", - argLen: 2, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "EqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint64x8", - argLen: 2, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "GreaterEqualUint64x8", - argLen: 2, + name: "DiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "LessUint64x8", + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint64x8", + name: "MaskedCeilWithPrecisionFloat32x16", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedAndUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedAndNotUint64x8", - argLen: 3, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedEqualUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x8", - argLen: 3, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint64x8", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint64x8", - argLen: 3, + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint64x8", - argLen: 3, + name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMinUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMulEvenWidenUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedNotEqualUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedOrUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedPopCountUint64x8", + name: "MaskedTruncWithPrecisionFloat32x16", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSubUint64x8", - argLen: 3, + name: "RoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaskedXorUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "RoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MaxUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "TruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MinUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "TruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MulEvenWidenUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "NotEqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "OrUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "PopCountUint64x8", + name: "DiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SubUint64x8", - argLen: 2, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "XorUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndNotUint8x16", - argLen: 2, + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "AverageUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "EqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "FloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint8x16", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x16", + name: "MaskedCeilWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessUint8x16", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint8x16", + name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint8x16", - argLen: 3, + name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x16", - argLen: 3, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x16", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint8x16", - argLen: 3, + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMinUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedNotEqualUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedPopCountUint8x16", + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedSaturatedSubUint8x16", - argLen: 3, + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 3, + name: "MaskedTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSubUint8x16", - argLen: 3, + name: "RoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaxUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "RoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MinUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "TruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "NotEqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "TruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "OrUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "PopCountUint8x16", + name: "CeilWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SaturatedAddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "SaturatedSubUint8x16", - argLen: 2, + name: "DiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 2, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SubUint8x16", - argLen: 2, + name: "DiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "XorUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndNotUint8x32", - argLen: 2, + name: "DiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "AverageUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "FloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "EqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "FloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint8x32", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x32", + name: "MaskedCeilWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessUint8x32", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint8x32", + name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedAverageUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedEqualUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint8x32", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x32", - argLen: 3, + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x32", - argLen: 3, + name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint8x32", - argLen: 3, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMinUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedNotEqualUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedPopCountUint8x32", + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedSaturatedSubUint8x32", - argLen: 3, + name: "RoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 3, + name: "RoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaskedSubUint8x32", - argLen: 3, + name: "TruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaxUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "TruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MinUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "NotEqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "CeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "OrUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "PopCountUint8x32", + name: "DiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SaturatedAddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "SaturatedSubUint8x32", - argLen: 2, + name: "DiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 2, + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SubUint8x32", - argLen: 2, + name: "DiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "XorUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AverageUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "FloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "EqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "FloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint8x64", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x64", + name: "MaskedCeilWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessUint8x64", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint8x64", + name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterUint8x64", - argLen: 3, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x64", - argLen: 3, + name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x64", - argLen: 3, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint8x64", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedPopCountUint8x64", + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint8x64", - argLen: 3, + name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 3, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSubUint8x64", - argLen: 3, + name: "MaskedFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaxUint8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinUint8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "PopCountUint8x64", - argLen: 1, + name: "MaskedRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "SaturatedAddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint8x64", + name: "MaskedTruncWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, + name: "RoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SubUint8x64", - argLen: 2, + name: "RoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat32x16", + name: "TruncSuppressExceptionWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x16", + name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + name: "CeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", + name: "FloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x16", + name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + name: "MaskedCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + name: "MaskedDiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + name: "MaskedDiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + name: "MaskedDiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x16", + name: "MaskedDiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x16", + name: "MaskedFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x16", + name: "MaskedRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x16", + name: "MaskedTruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat32x16", + name: "RoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "RoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat32x4", + name: "TruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x4", + name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + name: "CeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x4", + name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", + name: "FloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x4", + name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + name: "MaskedCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + name: "MaskedDiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + name: "MaskedDiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + name: "MaskedDiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x4", + name: "MaskedDiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x4", + name: "MaskedFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x4", + name: "MaskedRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x4", + name: "MaskedTruncWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat32x4", + name: "RoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "RoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat32x8", + name: "TruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x8", + name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat32x8", + name: "GetElemInt16x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x8", + name: "SetElemInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedRotateAllLeftInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + name: "MaskedRotateAllRightInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + name: "RotateAllLeftInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + name: "RotateAllRightInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", + name: "GetElemInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x8", + name: "MaskedRotateAllLeftInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedRotateAllRightInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RoundWithPrecisionFloat32x8", + name: "RotateAllLeftInt32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat32x8", + name: "RotateAllRightInt32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "SetElemInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + name: "RotateAllLeftInt32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "RotateAllRightInt32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat64x2", + name: "GetElemInt64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + name: "RotateAllLeftInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + name: "RotateAllRightInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", + name: "RotateAllLeftInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x2", + name: "RotateAllRightInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat64x4", + name: "RotateAllLeftInt64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat64x4", + name: "RotateAllRightInt64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + name: "GetElemInt8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "SetElemInt8x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + name: "GetElemUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x4", + name: "SetElemUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedRotateAllLeftUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x4", + name: "MaskedRotateAllRightUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", + name: "RotateAllLeftUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x4", + name: "RotateAllRightUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat64x4", + name: "GetElemUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x4", + name: "MaskedRotateAllLeftUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedRotateAllRightUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "CeilWithPrecisionFloat64x8", + name: "RotateAllLeftUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + name: "RotateAllRightUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x8", + name: "SetElemUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + name: "ShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "ShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedRotateAllLeftUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "MaskedRotateAllRightUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat64x8", + name: "RotateAllLeftUint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "RotateAllRightUint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedCeilWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x8", + name: "ShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + name: "GetElemUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x8", + name: "MaskedRotateAllLeftUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedRotateAllRightUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x8", + name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x8", + name: "RotateAllLeftUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", + name: "RotateAllRightUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x8", + name: "SetElemUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", + name: "ShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x8", + name: "ShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedRotateAllLeftUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x8", + name: "MaskedRotateAllRightUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "RoundWithPrecisionFloat64x8", + name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "RotateAllLeftUint64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "GetElemInt16x8", + name: "RotateAllRightUint64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemInt16x8", + name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "SetElemInt32x4", + name: "ShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt64x2", + name: "MaskedRotateAllLeftUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemInt64x2", + name: "MaskedRotateAllRightUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt8x16", + name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "SetElemInt8x16", + name: "MaskedShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "GetElemUint16x8", + name: "RotateAllLeftUint64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemUint16x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "GetElemUint32x4", + name: "RotateAllRightUint64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemUint64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "SetElemUint64x2", + name: "ShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 668024a00fb52b..d7aa0339e7c8c0 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2862,6 +2862,102 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) case OpMaskedPopCountUint8x64: return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) + case OpMaskedRotateAllLeftInt32x16: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v) + case OpMaskedRotateAllLeftInt32x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v) + case OpMaskedRotateAllLeftInt32x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v) + case OpMaskedRotateAllLeftInt64x2: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v) + case OpMaskedRotateAllLeftInt64x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v) + case OpMaskedRotateAllLeftInt64x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v) + case OpMaskedRotateAllLeftUint32x16: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v) + case OpMaskedRotateAllLeftUint32x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v) + case OpMaskedRotateAllLeftUint32x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v) + case OpMaskedRotateAllLeftUint64x2: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v) + case OpMaskedRotateAllLeftUint64x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v) + case OpMaskedRotateAllLeftUint64x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v) + case OpMaskedRotateAllRightInt32x16: + return rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v) + case OpMaskedRotateAllRightInt32x4: + return rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v) + case OpMaskedRotateAllRightInt32x8: + return rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v) + case OpMaskedRotateAllRightInt64x2: + return rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v) + case OpMaskedRotateAllRightInt64x4: + return rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v) + case OpMaskedRotateAllRightInt64x8: + return rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v) + case OpMaskedRotateAllRightUint32x16: + return rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v) + case OpMaskedRotateAllRightUint32x4: + return rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v) + case OpMaskedRotateAllRightUint32x8: + return rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v) + case OpMaskedRotateAllRightUint64x2: + return rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v) + case OpMaskedRotateAllRightUint64x4: + return rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v) + case OpMaskedRotateAllRightUint64x8: + return rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v) + case OpMaskedRotateLeftInt32x16: + return rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v) + case OpMaskedRotateLeftInt32x4: + return rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v) + case OpMaskedRotateLeftInt32x8: + return rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v) + case OpMaskedRotateLeftInt64x2: + return rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v) + case OpMaskedRotateLeftInt64x4: + return rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v) + case OpMaskedRotateLeftInt64x8: + return rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v) + case OpMaskedRotateLeftUint32x16: + return rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v) + case OpMaskedRotateLeftUint32x4: + return rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v) + case OpMaskedRotateLeftUint32x8: + return rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v) + case OpMaskedRotateLeftUint64x2: + return rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v) + case OpMaskedRotateLeftUint64x4: + return rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v) + case OpMaskedRotateLeftUint64x8: + return rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v) + case OpMaskedRotateRightInt32x16: + return rewriteValueAMD64_OpMaskedRotateRightInt32x16(v) + case OpMaskedRotateRightInt32x4: + return rewriteValueAMD64_OpMaskedRotateRightInt32x4(v) + case OpMaskedRotateRightInt32x8: + return rewriteValueAMD64_OpMaskedRotateRightInt32x8(v) + case OpMaskedRotateRightInt64x2: + return rewriteValueAMD64_OpMaskedRotateRightInt64x2(v) + case OpMaskedRotateRightInt64x4: + return rewriteValueAMD64_OpMaskedRotateRightInt64x4(v) + case OpMaskedRotateRightInt64x8: + return rewriteValueAMD64_OpMaskedRotateRightInt64x8(v) + case OpMaskedRotateRightUint32x16: + return rewriteValueAMD64_OpMaskedRotateRightUint32x16(v) + case OpMaskedRotateRightUint32x4: + return rewriteValueAMD64_OpMaskedRotateRightUint32x4(v) + case OpMaskedRotateRightUint32x8: + return rewriteValueAMD64_OpMaskedRotateRightUint32x8(v) + case OpMaskedRotateRightUint64x2: + return rewriteValueAMD64_OpMaskedRotateRightUint64x2(v) + case OpMaskedRotateRightUint64x4: + return rewriteValueAMD64_OpMaskedRotateRightUint64x4(v) + case OpMaskedRotateRightUint64x8: + return rewriteValueAMD64_OpMaskedRotateRightUint64x8(v) case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v) case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4: @@ -2958,6 +3054,288 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v) case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v) + case OpMaskedShiftAllLeftInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v) + case OpMaskedShiftAllLeftInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v) + case OpMaskedShiftAllLeftInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v) + case OpMaskedShiftAllLeftUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v) + case OpMaskedShiftAllLeftUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v) + case OpMaskedShiftAllLeftUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v) + case OpMaskedShiftAllRightAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v) + case OpMaskedShiftAllRightAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v) + case OpMaskedShiftAllRightAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v) + case OpMaskedShiftAllRightAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v) + case OpMaskedShiftAllRightAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v) + case OpMaskedShiftAllRightAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v) + case OpMaskedShiftAllRightAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v) + case OpMaskedShiftAllRightAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v) + case OpMaskedShiftAllRightAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v) + case OpMaskedShiftAllRightAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v) + case OpMaskedShiftAllRightAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v) + case OpMaskedShiftAllRightAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v) + case OpMaskedShiftAllRightAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v) + case OpMaskedShiftAllRightAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v) + case OpMaskedShiftAllRightAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v) + case OpMaskedShiftAllRightAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v) + case OpMaskedShiftAllRightAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v) + case OpMaskedShiftAllRightAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v) + case OpMaskedShiftAllRightInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v) + case OpMaskedShiftAllRightInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v) + case OpMaskedShiftAllRightInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v) + case OpMaskedShiftAllRightSignExtendedInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v) + case OpMaskedShiftAllRightSignExtendedInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v) + case OpMaskedShiftAllRightSignExtendedInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v) + case OpMaskedShiftAllRightUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v) + case OpMaskedShiftAllRightUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v) + case OpMaskedShiftAllRightUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v) + case OpMaskedShiftLeftAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v) + case OpMaskedShiftLeftAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v) + case OpMaskedShiftLeftAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v) + case OpMaskedShiftLeftAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v) + case OpMaskedShiftLeftAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v) + case OpMaskedShiftLeftAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v) + case OpMaskedShiftLeftAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v) + case OpMaskedShiftLeftAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v) + case OpMaskedShiftLeftAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v) + case OpMaskedShiftLeftAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v) + case OpMaskedShiftLeftAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v) + case OpMaskedShiftLeftAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v) + case OpMaskedShiftLeftAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v) + case OpMaskedShiftLeftAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v) + case OpMaskedShiftLeftAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v) + case OpMaskedShiftLeftAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v) + case OpMaskedShiftLeftAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v) + case OpMaskedShiftLeftAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v) + case OpMaskedShiftLeftInt16x16: + return rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v) + case OpMaskedShiftLeftInt16x32: + return rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v) + case OpMaskedShiftLeftInt16x8: + return rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v) + case OpMaskedShiftLeftInt32x16: + return rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v) + case OpMaskedShiftLeftInt32x4: + return rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v) + case OpMaskedShiftLeftInt32x8: + return rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v) + case OpMaskedShiftLeftInt64x2: + return rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v) + case OpMaskedShiftLeftInt64x4: + return rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v) + case OpMaskedShiftLeftInt64x8: + return rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v) + case OpMaskedShiftLeftUint16x16: + return rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v) + case OpMaskedShiftLeftUint16x32: + return rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v) + case OpMaskedShiftLeftUint16x8: + return rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v) + case OpMaskedShiftLeftUint32x16: + return rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v) + case OpMaskedShiftLeftUint32x4: + return rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v) + case OpMaskedShiftLeftUint32x8: + return rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v) + case OpMaskedShiftLeftUint64x2: + return rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v) + case OpMaskedShiftLeftUint64x4: + return rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v) + case OpMaskedShiftLeftUint64x8: + return rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v) + case OpMaskedShiftRightAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v) + case OpMaskedShiftRightAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v) + case OpMaskedShiftRightAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v) + case OpMaskedShiftRightAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v) + case OpMaskedShiftRightAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v) + case OpMaskedShiftRightAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v) + case OpMaskedShiftRightAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v) + case OpMaskedShiftRightAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v) + case OpMaskedShiftRightAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v) + case OpMaskedShiftRightAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v) + case OpMaskedShiftRightAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v) + case OpMaskedShiftRightAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v) + case OpMaskedShiftRightAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v) + case OpMaskedShiftRightAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v) + case OpMaskedShiftRightAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v) + case OpMaskedShiftRightAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v) + case OpMaskedShiftRightAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v) + case OpMaskedShiftRightAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v) + case OpMaskedShiftRightInt16x16: + return rewriteValueAMD64_OpMaskedShiftRightInt16x16(v) + case OpMaskedShiftRightInt16x32: + return rewriteValueAMD64_OpMaskedShiftRightInt16x32(v) + case OpMaskedShiftRightInt16x8: + return rewriteValueAMD64_OpMaskedShiftRightInt16x8(v) + case OpMaskedShiftRightInt32x16: + return rewriteValueAMD64_OpMaskedShiftRightInt32x16(v) + case OpMaskedShiftRightInt32x4: + return rewriteValueAMD64_OpMaskedShiftRightInt32x4(v) + case OpMaskedShiftRightInt32x8: + return rewriteValueAMD64_OpMaskedShiftRightInt32x8(v) + case OpMaskedShiftRightInt64x2: + return rewriteValueAMD64_OpMaskedShiftRightInt64x2(v) + case OpMaskedShiftRightInt64x4: + return rewriteValueAMD64_OpMaskedShiftRightInt64x4(v) + case OpMaskedShiftRightInt64x8: + return rewriteValueAMD64_OpMaskedShiftRightInt64x8(v) + case OpMaskedShiftRightSignExtendedInt16x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v) + case OpMaskedShiftRightSignExtendedInt16x32: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v) + case OpMaskedShiftRightSignExtendedInt16x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v) + case OpMaskedShiftRightSignExtendedInt32x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v) + case OpMaskedShiftRightSignExtendedInt32x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v) + case OpMaskedShiftRightSignExtendedInt32x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v) + case OpMaskedShiftRightSignExtendedInt64x2: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v) + case OpMaskedShiftRightSignExtendedInt64x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v) + case OpMaskedShiftRightSignExtendedInt64x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v) + case OpMaskedShiftRightSignExtendedUint16x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v) + case OpMaskedShiftRightSignExtendedUint16x32: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v) + case OpMaskedShiftRightSignExtendedUint16x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v) + case OpMaskedShiftRightSignExtendedUint32x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v) + case OpMaskedShiftRightSignExtendedUint32x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v) + case OpMaskedShiftRightSignExtendedUint32x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v) + case OpMaskedShiftRightSignExtendedUint64x2: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v) + case OpMaskedShiftRightSignExtendedUint64x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v) + case OpMaskedShiftRightSignExtendedUint64x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v) + case OpMaskedShiftRightUint16x16: + return rewriteValueAMD64_OpMaskedShiftRightUint16x16(v) + case OpMaskedShiftRightUint16x32: + return rewriteValueAMD64_OpMaskedShiftRightUint16x32(v) + case OpMaskedShiftRightUint16x8: + return rewriteValueAMD64_OpMaskedShiftRightUint16x8(v) + case OpMaskedShiftRightUint32x16: + return rewriteValueAMD64_OpMaskedShiftRightUint32x16(v) + case OpMaskedShiftRightUint32x4: + return rewriteValueAMD64_OpMaskedShiftRightUint32x4(v) + case OpMaskedShiftRightUint32x8: + return rewriteValueAMD64_OpMaskedShiftRightUint32x8(v) + case OpMaskedShiftRightUint64x2: + return rewriteValueAMD64_OpMaskedShiftRightUint64x2(v) + case OpMaskedShiftRightUint64x4: + return rewriteValueAMD64_OpMaskedShiftRightUint64x4(v) + case OpMaskedShiftRightUint64x8: + return rewriteValueAMD64_OpMaskedShiftRightUint64x8(v) case OpMaskedSqrtFloat32x16: return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) case OpMaskedSqrtFloat32x4: @@ -3812,6 +4190,54 @@ func rewriteValueAMD64(v *Value) bool { case OpPrefetchCacheStreamed: v.Op = OpAMD64PrefetchNTA return true + case OpRotateAllLeftInt32x16: + return rewriteValueAMD64_OpRotateAllLeftInt32x16(v) + case OpRotateAllLeftInt32x4: + return rewriteValueAMD64_OpRotateAllLeftInt32x4(v) + case OpRotateAllLeftInt32x8: + return rewriteValueAMD64_OpRotateAllLeftInt32x8(v) + case OpRotateAllLeftInt64x2: + return rewriteValueAMD64_OpRotateAllLeftInt64x2(v) + case OpRotateAllLeftInt64x4: + return rewriteValueAMD64_OpRotateAllLeftInt64x4(v) + case OpRotateAllLeftInt64x8: + return rewriteValueAMD64_OpRotateAllLeftInt64x8(v) + case OpRotateAllLeftUint32x16: + return rewriteValueAMD64_OpRotateAllLeftUint32x16(v) + case OpRotateAllLeftUint32x4: + return rewriteValueAMD64_OpRotateAllLeftUint32x4(v) + case OpRotateAllLeftUint32x8: + return rewriteValueAMD64_OpRotateAllLeftUint32x8(v) + case OpRotateAllLeftUint64x2: + return rewriteValueAMD64_OpRotateAllLeftUint64x2(v) + case OpRotateAllLeftUint64x4: + return rewriteValueAMD64_OpRotateAllLeftUint64x4(v) + case OpRotateAllLeftUint64x8: + return rewriteValueAMD64_OpRotateAllLeftUint64x8(v) + case OpRotateAllRightInt32x16: + return rewriteValueAMD64_OpRotateAllRightInt32x16(v) + case OpRotateAllRightInt32x4: + return rewriteValueAMD64_OpRotateAllRightInt32x4(v) + case OpRotateAllRightInt32x8: + return rewriteValueAMD64_OpRotateAllRightInt32x8(v) + case OpRotateAllRightInt64x2: + return rewriteValueAMD64_OpRotateAllRightInt64x2(v) + case OpRotateAllRightInt64x4: + return rewriteValueAMD64_OpRotateAllRightInt64x4(v) + case OpRotateAllRightInt64x8: + return rewriteValueAMD64_OpRotateAllRightInt64x8(v) + case OpRotateAllRightUint32x16: + return rewriteValueAMD64_OpRotateAllRightUint32x16(v) + case OpRotateAllRightUint32x4: + return rewriteValueAMD64_OpRotateAllRightUint32x4(v) + case OpRotateAllRightUint32x8: + return rewriteValueAMD64_OpRotateAllRightUint32x8(v) + case OpRotateAllRightUint64x2: + return rewriteValueAMD64_OpRotateAllRightUint64x2(v) + case OpRotateAllRightUint64x4: + return rewriteValueAMD64_OpRotateAllRightUint64x4(v) + case OpRotateAllRightUint64x8: + return rewriteValueAMD64_OpRotateAllRightUint64x8(v) case OpRotateLeft16: v.Op = OpAMD64ROLW return true @@ -3824,6 +4250,78 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateLeft8: v.Op = OpAMD64ROLB return true + case OpRotateLeftInt32x16: + v.Op = OpAMD64VPROLVD512 + return true + case OpRotateLeftInt32x4: + v.Op = OpAMD64VPROLVD128 + return true + case OpRotateLeftInt32x8: + v.Op = OpAMD64VPROLVD256 + return true + case OpRotateLeftInt64x2: + v.Op = OpAMD64VPROLVQ128 + return true + case OpRotateLeftInt64x4: + v.Op = OpAMD64VPROLVQ256 + return true + case OpRotateLeftInt64x8: + v.Op = OpAMD64VPROLVQ512 + return true + case OpRotateLeftUint32x16: + v.Op = OpAMD64VPROLVD512 + return true + case OpRotateLeftUint32x4: + v.Op = OpAMD64VPROLVD128 + return true + case OpRotateLeftUint32x8: + v.Op = OpAMD64VPROLVD256 + return true + case OpRotateLeftUint64x2: + v.Op = OpAMD64VPROLVQ128 + return true + case OpRotateLeftUint64x4: + v.Op = OpAMD64VPROLVQ256 + return true + case OpRotateLeftUint64x8: + v.Op = OpAMD64VPROLVQ512 + return true + case OpRotateRightInt32x16: + v.Op = OpAMD64VPRORVD512 + return true + case OpRotateRightInt32x4: + v.Op = OpAMD64VPRORVD128 + return true + case OpRotateRightInt32x8: + v.Op = OpAMD64VPRORVD256 + return true + case OpRotateRightInt64x2: + v.Op = OpAMD64VPRORVQ128 + return true + case OpRotateRightInt64x4: + v.Op = OpAMD64VPRORVQ256 + return true + case OpRotateRightInt64x8: + v.Op = OpAMD64VPRORVQ512 + return true + case OpRotateRightUint32x16: + v.Op = OpAMD64VPRORVD512 + return true + case OpRotateRightUint32x4: + v.Op = OpAMD64VPRORVD128 + return true + case OpRotateRightUint32x8: + v.Op = OpAMD64VPRORVD256 + return true + case OpRotateRightUint64x2: + v.Op = OpAMD64VPRORVQ128 + return true + case OpRotateRightUint64x4: + v.Op = OpAMD64VPRORVQ256 + return true + case OpRotateRightUint64x8: + v.Op = OpAMD64VPRORVQ512 + return true case OpRound32F: v.Op = OpAMD64LoweredRound32F return true @@ -4070,6 +4568,453 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSetElemUint64x2(v) case OpSetElemUint8x16: return rewriteValueAMD64_OpSetElemUint8x16(v) + case OpShiftAllLeftAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v) + case OpShiftAllLeftAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v) + case OpShiftAllLeftAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v) + case OpShiftAllLeftAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v) + case OpShiftAllLeftAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v) + case OpShiftAllLeftAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v) + case OpShiftAllLeftAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v) + case OpShiftAllLeftAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v) + case OpShiftAllLeftAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v) + case OpShiftAllLeftAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v) + case OpShiftAllLeftAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v) + case OpShiftAllLeftAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v) + case OpShiftAllLeftAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v) + case OpShiftAllLeftAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v) + case OpShiftAllLeftAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v) + case OpShiftAllLeftAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v) + case OpShiftAllLeftAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v) + case OpShiftAllLeftAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v) + case OpShiftAllLeftInt16x16: + v.Op = OpAMD64VPSLLW256 + return true + case OpShiftAllLeftInt16x8: + v.Op = OpAMD64VPSLLW128 + return true + case OpShiftAllLeftInt32x4: + v.Op = OpAMD64VPSLLD128 + return true + case OpShiftAllLeftInt32x8: + v.Op = OpAMD64VPSLLD256 + return true + case OpShiftAllLeftInt64x2: + v.Op = OpAMD64VPSLLQ128 + return true + case OpShiftAllLeftInt64x4: + v.Op = OpAMD64VPSLLQ256 + return true + case OpShiftAllLeftInt64x8: + v.Op = OpAMD64VPSLLQ512 + return true + case OpShiftAllLeftUint16x16: + v.Op = OpAMD64VPSLLW256 + return true + case OpShiftAllLeftUint16x8: + v.Op = OpAMD64VPSLLW128 + return true + case OpShiftAllLeftUint32x4: + v.Op = OpAMD64VPSLLD128 + return true + case OpShiftAllLeftUint32x8: + v.Op = OpAMD64VPSLLD256 + return true + case OpShiftAllLeftUint64x2: + v.Op = OpAMD64VPSLLQ128 + return true + case OpShiftAllLeftUint64x4: + v.Op = OpAMD64VPSLLQ256 + return true + case OpShiftAllLeftUint64x8: + v.Op = OpAMD64VPSLLQ512 + return true + case OpShiftAllRightAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v) + case OpShiftAllRightAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v) + case OpShiftAllRightAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v) + case OpShiftAllRightAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v) + case OpShiftAllRightAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v) + case OpShiftAllRightAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v) + case OpShiftAllRightAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v) + case OpShiftAllRightAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v) + case OpShiftAllRightAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v) + case OpShiftAllRightAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v) + case OpShiftAllRightAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v) + case OpShiftAllRightAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v) + case OpShiftAllRightAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v) + case OpShiftAllRightAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v) + case OpShiftAllRightAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v) + case OpShiftAllRightAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v) + case OpShiftAllRightAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v) + case OpShiftAllRightAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v) + case OpShiftAllRightInt16x16: + v.Op = OpAMD64VPSRLW256 + return true + case OpShiftAllRightInt16x8: + v.Op = OpAMD64VPSRLW128 + return true + case OpShiftAllRightInt32x4: + v.Op = OpAMD64VPSRLD128 + return true + case OpShiftAllRightInt32x8: + v.Op = OpAMD64VPSRLD256 + return true + case OpShiftAllRightInt64x2: + v.Op = OpAMD64VPSRLQ128 + return true + case OpShiftAllRightInt64x4: + v.Op = OpAMD64VPSRLQ256 + return true + case OpShiftAllRightInt64x8: + v.Op = OpAMD64VPSRLQ512 + return true + case OpShiftAllRightSignExtendedInt16x16: + v.Op = OpAMD64VPSRAW256 + return true + case OpShiftAllRightSignExtendedInt16x8: + v.Op = OpAMD64VPSRAW128 + return true + case OpShiftAllRightSignExtendedInt32x4: + v.Op = OpAMD64VPSRAD128 + return true + case OpShiftAllRightSignExtendedInt32x8: + v.Op = OpAMD64VPSRAD256 + return true + case OpShiftAllRightSignExtendedInt64x2: + v.Op = OpAMD64VPSRAQ128 + return true + case OpShiftAllRightSignExtendedInt64x4: + v.Op = OpAMD64VPSRAQ256 + return true + case OpShiftAllRightSignExtendedInt64x8: + v.Op = OpAMD64VPSRAQ512 + return true + case OpShiftAllRightUint16x16: + v.Op = OpAMD64VPSRLW256 + return true + case OpShiftAllRightUint16x8: + v.Op = OpAMD64VPSRLW128 + return true + case OpShiftAllRightUint32x4: + v.Op = OpAMD64VPSRLD128 + return true + case OpShiftAllRightUint32x8: + v.Op = OpAMD64VPSRLD256 + return true + case OpShiftAllRightUint64x2: + v.Op = OpAMD64VPSRLQ128 + return true + case OpShiftAllRightUint64x4: + v.Op = OpAMD64VPSRLQ256 + return true + case OpShiftAllRightUint64x8: + v.Op = OpAMD64VPSRLQ512 + return true + case OpShiftLeftAndFillUpperFromInt16x16: + v.Op = OpAMD64VPSHLDVW256 + return true + case OpShiftLeftAndFillUpperFromInt16x32: + v.Op = OpAMD64VPSHLDVW512 + return true + case OpShiftLeftAndFillUpperFromInt16x8: + v.Op = OpAMD64VPSHLDVW128 + return true + case OpShiftLeftAndFillUpperFromInt32x16: + v.Op = OpAMD64VPSHLDVD512 + return true + case OpShiftLeftAndFillUpperFromInt32x4: + v.Op = OpAMD64VPSHLDVD128 + return true + case OpShiftLeftAndFillUpperFromInt32x8: + v.Op = OpAMD64VPSHLDVD256 + return true + case OpShiftLeftAndFillUpperFromInt64x2: + v.Op = OpAMD64VPSHLDVQ128 + return true + case OpShiftLeftAndFillUpperFromInt64x4: + v.Op = OpAMD64VPSHLDVQ256 + return true + case OpShiftLeftAndFillUpperFromInt64x8: + v.Op = OpAMD64VPSHLDVQ512 + return true + case OpShiftLeftAndFillUpperFromUint16x16: + v.Op = OpAMD64VPSHLDVW256 + return true + case OpShiftLeftAndFillUpperFromUint16x32: + v.Op = OpAMD64VPSHLDVW512 + return true + case OpShiftLeftAndFillUpperFromUint16x8: + v.Op = OpAMD64VPSHLDVW128 + return true + case OpShiftLeftAndFillUpperFromUint32x16: + v.Op = OpAMD64VPSHLDVD512 + return true + case OpShiftLeftAndFillUpperFromUint32x4: + v.Op = OpAMD64VPSHLDVD128 + return true + case OpShiftLeftAndFillUpperFromUint32x8: + v.Op = OpAMD64VPSHLDVD256 + return true + case OpShiftLeftAndFillUpperFromUint64x2: + v.Op = OpAMD64VPSHLDVQ128 + return true + case OpShiftLeftAndFillUpperFromUint64x4: + v.Op = OpAMD64VPSHLDVQ256 + return true + case OpShiftLeftAndFillUpperFromUint64x8: + v.Op = OpAMD64VPSHLDVQ512 + return true + case OpShiftLeftInt16x16: + v.Op = OpAMD64VPSLLVW256 + return true + case OpShiftLeftInt16x32: + v.Op = OpAMD64VPSLLVW512 + return true + case OpShiftLeftInt16x8: + v.Op = OpAMD64VPSLLVW128 + return true + case OpShiftLeftInt32x16: + v.Op = OpAMD64VPSLLVD512 + return true + case OpShiftLeftInt32x4: + v.Op = OpAMD64VPSLLVD128 + return true + case OpShiftLeftInt32x8: + v.Op = OpAMD64VPSLLVD256 + return true + case OpShiftLeftInt64x2: + v.Op = OpAMD64VPSLLVQ128 + return true + case OpShiftLeftInt64x4: + v.Op = OpAMD64VPSLLVQ256 + return true + case OpShiftLeftInt64x8: + v.Op = OpAMD64VPSLLVQ512 + return true + case OpShiftLeftUint16x16: + v.Op = OpAMD64VPSLLVW256 + return true + case OpShiftLeftUint16x32: + v.Op = OpAMD64VPSLLVW512 + return true + case OpShiftLeftUint16x8: + v.Op = OpAMD64VPSLLVW128 + return true + case OpShiftLeftUint32x16: + v.Op = OpAMD64VPSLLVD512 + return true + case OpShiftLeftUint32x4: + v.Op = OpAMD64VPSLLVD128 + return true + case OpShiftLeftUint32x8: + v.Op = OpAMD64VPSLLVD256 + return true + case OpShiftLeftUint64x2: + v.Op = OpAMD64VPSLLVQ128 + return true + case OpShiftLeftUint64x4: + v.Op = OpAMD64VPSLLVQ256 + return true + case OpShiftLeftUint64x8: + v.Op = OpAMD64VPSLLVQ512 + return true + case OpShiftRightAndFillUpperFromInt16x16: + v.Op = OpAMD64VPSHRDVW256 + return true + case OpShiftRightAndFillUpperFromInt16x32: + v.Op = OpAMD64VPSHRDVW512 + return true + case OpShiftRightAndFillUpperFromInt16x8: + v.Op = OpAMD64VPSHRDVW128 + return true + case OpShiftRightAndFillUpperFromInt32x16: + v.Op = OpAMD64VPSHRDVD512 + return true + case OpShiftRightAndFillUpperFromInt32x4: + v.Op = OpAMD64VPSHRDVD128 + return true + case OpShiftRightAndFillUpperFromInt32x8: + v.Op = OpAMD64VPSHRDVD256 + return true + case OpShiftRightAndFillUpperFromInt64x2: + v.Op = OpAMD64VPSHRDVQ128 + return true + case OpShiftRightAndFillUpperFromInt64x4: + v.Op = OpAMD64VPSHRDVQ256 + return true + case OpShiftRightAndFillUpperFromInt64x8: + v.Op = OpAMD64VPSHRDVQ512 + return true + case OpShiftRightAndFillUpperFromUint16x16: + v.Op = OpAMD64VPSHRDVW256 + return true + case OpShiftRightAndFillUpperFromUint16x32: + v.Op = OpAMD64VPSHRDVW512 + return true + case OpShiftRightAndFillUpperFromUint16x8: + v.Op = OpAMD64VPSHRDVW128 + return true + case OpShiftRightAndFillUpperFromUint32x16: + v.Op = OpAMD64VPSHRDVD512 + return true + case OpShiftRightAndFillUpperFromUint32x4: + v.Op = OpAMD64VPSHRDVD128 + return true + case OpShiftRightAndFillUpperFromUint32x8: + v.Op = OpAMD64VPSHRDVD256 + return true + case OpShiftRightAndFillUpperFromUint64x2: + v.Op = OpAMD64VPSHRDVQ128 + return true + case OpShiftRightAndFillUpperFromUint64x4: + v.Op = OpAMD64VPSHRDVQ256 + return true + case OpShiftRightAndFillUpperFromUint64x8: + v.Op = OpAMD64VPSHRDVQ512 + return true + case OpShiftRightInt16x16: + v.Op = OpAMD64VPSRLVW256 + return true + case OpShiftRightInt16x32: + v.Op = OpAMD64VPSRLVW512 + return true + case OpShiftRightInt16x8: + v.Op = OpAMD64VPSRLVW128 + return true + case OpShiftRightInt32x16: + v.Op = OpAMD64VPSRLVD512 + return true + case OpShiftRightInt32x4: + v.Op = OpAMD64VPSRLVD128 + return true + case OpShiftRightInt32x8: + v.Op = OpAMD64VPSRLVD256 + return true + case OpShiftRightInt64x2: + v.Op = OpAMD64VPSRLVQ128 + return true + case OpShiftRightInt64x4: + v.Op = OpAMD64VPSRLVQ256 + return true + case OpShiftRightInt64x8: + v.Op = OpAMD64VPSRLVQ512 + return true + case OpShiftRightSignExtendedInt16x16: + v.Op = OpAMD64VPSRAVW256 + return true + case OpShiftRightSignExtendedInt16x32: + v.Op = OpAMD64VPSRAVW512 + return true + case OpShiftRightSignExtendedInt16x8: + v.Op = OpAMD64VPSRAVW128 + return true + case OpShiftRightSignExtendedInt32x16: + v.Op = OpAMD64VPSRAVD512 + return true + case OpShiftRightSignExtendedInt32x4: + v.Op = OpAMD64VPSRAVD128 + return true + case OpShiftRightSignExtendedInt32x8: + v.Op = OpAMD64VPSRAVD256 + return true + case OpShiftRightSignExtendedInt64x2: + v.Op = OpAMD64VPSRAVQ128 + return true + case OpShiftRightSignExtendedInt64x4: + v.Op = OpAMD64VPSRAVQ256 + return true + case OpShiftRightSignExtendedInt64x8: + v.Op = OpAMD64VPSRAVQ512 + return true + case OpShiftRightSignExtendedUint16x16: + v.Op = OpAMD64VPSRAVW256 + return true + case OpShiftRightSignExtendedUint16x32: + v.Op = OpAMD64VPSRAVW512 + return true + case OpShiftRightSignExtendedUint16x8: + v.Op = OpAMD64VPSRAVW128 + return true + case OpShiftRightSignExtendedUint32x16: + v.Op = OpAMD64VPSRAVD512 + return true + case OpShiftRightSignExtendedUint32x4: + v.Op = OpAMD64VPSRAVD128 + return true + case OpShiftRightSignExtendedUint32x8: + v.Op = OpAMD64VPSRAVD256 + return true + case OpShiftRightSignExtendedUint64x2: + v.Op = OpAMD64VPSRAVQ128 + return true + case OpShiftRightSignExtendedUint64x4: + v.Op = OpAMD64VPSRAVQ256 + return true + case OpShiftRightSignExtendedUint64x8: + v.Op = OpAMD64VPSRAVQ512 + return true + case OpShiftRightUint16x16: + v.Op = OpAMD64VPSRLVW256 + return true + case OpShiftRightUint16x32: + v.Op = OpAMD64VPSRLVW512 + return true + case OpShiftRightUint16x8: + v.Op = OpAMD64VPSRLVW128 + return true + case OpShiftRightUint32x16: + v.Op = OpAMD64VPSRLVD512 + return true + case OpShiftRightUint32x4: + v.Op = OpAMD64VPSRLVD128 + return true + case OpShiftRightUint32x8: + v.Op = OpAMD64VPSRLVD256 + return true + case OpShiftRightUint64x2: + v.Op = OpAMD64VPSRLVQ128 + return true + case OpShiftRightUint64x4: + v.Op = OpAMD64VPSRLVQ256 + return true + case OpShiftRightUint64x8: + v.Op = OpAMD64VPSRLVQ512 + return true case OpSignExt16to32: v.Op = OpAMD64MOVWQSX return true @@ -43973,885 +44918,4431 @@ func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateAllLeftInt32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateAllLeftInt32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + // match: (MaskedRotateAllLeftInt32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + // match: (MaskedRotateAllLeftInt64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + // match: (MaskedRotateAllLeftInt64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + // match: (MaskedRotateAllLeftInt64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateAllLeftUint32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateAllLeftUint32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + // match: (MaskedRotateAllLeftUint32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + // match: (MaskedRotateAllLeftUint64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + // match: (MaskedRotateAllLeftUint64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + // match: (MaskedRotateAllLeftUint64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateAllRightInt32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateAllRightInt32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateAllRightInt32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateAllRightInt64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateAllRightInt64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateAllRightInt64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateAllRightUint32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateAllRightUint32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateAllRightUint32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateAllRightUint64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateAllRightUint64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateAllRightUint64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateLeftInt32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) + mask := v_2 + v.reset(OpAMD64VPROLVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateLeftInt32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) + mask := v_2 + v.reset(OpAMD64VPROLVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (MaskedRotateLeftInt32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) + mask := v_2 + v.reset(OpAMD64VPROLVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateLeftInt64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateLeftInt64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateLeftInt64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateLeftUint32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateLeftUint32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateLeftUint32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateLeftUint64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateLeftUint64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateLeftUint64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateRightInt32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateRightInt32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateRightInt32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateRightInt64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateRightInt64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateRightInt64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateRightUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateRightUint32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) + mask := v_2 + v.reset(OpAMD64VPRORVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateRightUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateRightUint32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) + mask := v_2 + v.reset(OpAMD64VPRORVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftInt64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftInt64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftInt64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftUint64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftUint64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftUint64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightInt64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightInt64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightInt64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightSignExtendedInt64x2 x y mask) + // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightSignExtendedInt64x4 x y mask) + // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightSignExtendedInt64x8 x y mask) + // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightUint64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightUint64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightUint64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (MaskedShiftRightSignExtendedInt32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (MaskedShiftRightSignExtendedInt64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (MaskedShiftRightSignExtendedUint32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (MaskedShiftRightSignExtendedUint32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } @@ -47629,6 +52120,318 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } +func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x16 [a] x) + // result: (VPROLD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x4 [a] x) + // result: (VPROLD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x8 [a] x) + // result: (VPROLD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x2 [a] x) + // result: (VPROLQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x4 [a] x) + // result: (VPROLQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x8 [a] x) + // result: (VPROLQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint32x16 [a] x) + // result: (VPROLD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint32x4 [a] x) + // result: (VPROLD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint32x8 [a] x) + // result: (VPROLD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint64x2 [a] x) + // result: (VPROLQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint64x4 [a] x) + // result: (VPROLQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint64x8 [a] x) + // result: (VPROLQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt32x16 [a] x) + // result: (VPRORD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt32x4 [a] x) + // result: (VPRORD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt32x8 [a] x) + // result: (VPRORD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt64x2 [a] x) + // result: (VPRORQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt64x4 [a] x) + // result: (VPRORQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt64x8 [a] x) + // result: (VPRORQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint32x16 [a] x) + // result: (VPRORD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint32x4 [a] x) + // result: (VPRORD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint32x8 [a] x) + // result: (VPRORD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint64x2 [a] x) + // result: (VPRORQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint64x4 [a] x) + // result: (VPRORQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint64x8 [a] x) + // result: (VPRORQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { v_0 := v.Args[0] // match: (RoundFloat32x4 x) @@ -49718,6 +54521,546 @@ func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 5d6ae7e3c06b7a..d20c9392936be5 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -915,6 +915,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -963,6 +1011,147 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) @@ -1242,6 +1431,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateLeft", opLen2(ssa.OpRotateLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateLeft", opLen2(ssa.OpRotateLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateLeft", opLen2(ssa.OpRotateLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateLeft", opLen2(ssa.OpRotateLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateLeft", opLen2(ssa.OpRotateLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateLeft", opLen2(ssa.OpRotateLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateLeft", opLen2(ssa.OpRotateLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateLeft", opLen2(ssa.OpRotateLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateLeft", opLen2(ssa.OpRotateLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateLeft", opLen2(ssa.OpRotateLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateLeft", opLen2(ssa.OpRotateLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateLeft", opLen2(ssa.OpRotateLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateRight", opLen2(ssa.OpRotateRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateRight", opLen2(ssa.OpRotateRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateRight", opLen2(ssa.OpRotateRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateRight", opLen2(ssa.OpRotateRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateRight", opLen2(ssa.OpRotateRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateRight", opLen2(ssa.OpRotateRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateRight", opLen2(ssa.OpRotateRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateRight", opLen2(ssa.OpRotateRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateRight", opLen2(ssa.OpRotateRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateRight", opLen2(ssa.OpRotateRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateRight", opLen2(ssa.OpRotateRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateRight", opLen2(ssa.OpRotateRightUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) @@ -1306,6 +1543,167 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SetElem", opLen2Imm8(ssa.OpSetElemUint16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint32x4.SetElem", opLen2Imm8(ssa.OpSetElemUint32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeft", opLen2(ssa.OpShiftLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeft", opLen2(ssa.OpShiftLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeft", opLen2(ssa.OpShiftLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeft", opLen2(ssa.OpShiftLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeft", opLen2(ssa.OpShiftLeftUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeft", opLen2(ssa.OpShiftLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeft", opLen2(ssa.OpShiftLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRight", opLen2(ssa.OpShiftRightInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRight", opLen2(ssa.OpShiftRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRight", opLen2(ssa.OpShiftRightInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRight", opLen2(ssa.OpShiftRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRight", opLen2(ssa.OpShiftRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRight", opLen2(ssa.OpShiftRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRight", opLen2(ssa.OpShiftRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRight", opLen2(ssa.OpShiftRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRight", opLen2(ssa.OpShiftRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRight", opLen2(ssa.OpShiftRightUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRight", opLen2(ssa.OpShiftRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRight", opLen2(ssa.OpShiftRightUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRight", opLen2(ssa.OpShiftRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRight", opLen2(ssa.OpShiftRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRight", opLen2(ssa.OpShiftRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRight", opLen2(ssa.OpShiftRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRight", opLen2(ssa.OpShiftRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRight", opLen2(ssa.OpShiftRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index b5f6bb517a318f..ad828e9d3f81c3 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -2147,6 +2147,12 @@ func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which gotv = vec0.SaturatedPairwiseSub(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2187,6 +2193,12 @@ func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, w gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) @@ -2307,6 +2319,55 @@ func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } +func testInt16x8Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + vec3 := simd.LoadInt16x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x8Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x8 @@ -2387,6 +2448,12 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.SaturatedPairwiseSub(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2427,6 +2494,12 @@ func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) @@ -2547,6 +2620,55 @@ func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } +func testInt16x16Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + vec3 := simd.LoadInt16x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x16 @@ -2613,6 +2735,12 @@ func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -2649,6 +2777,12 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) @@ -2769,6 +2903,55 @@ func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } +func testInt16x32Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + vec3 := simd.LoadInt16x32Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x32 @@ -2839,6 +3022,16 @@ func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2879,6 +3072,16 @@ func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": @@ -3028,6 +3231,55 @@ func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } +func testInt32x4Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x4Uint8x16Int8x16Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3147,6 +3399,16 @@ func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -3187,6 +3449,16 @@ func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) case "MaskedXor": @@ -3336,6 +3608,55 @@ func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } +func testInt32x8Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x8Uint8x32Int8x32Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -3451,6 +3772,16 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3489,6 +3820,16 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) case "MaskedXor": @@ -3617,6 +3958,55 @@ func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } +func testInt32x16Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -3734,6 +4124,16 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3774,6 +4174,16 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) case "MaskedXor": @@ -3853,16 +4263,18 @@ func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } -func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { +func testInt64x2Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 got := make([]int64, len(want)) vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -3875,17 +4287,19 @@ func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { } } -func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 got := make([]int64, len(want)) vec0 := simd.LoadInt64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -3898,17 +4312,62 @@ func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, } } -func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) + vec0 := simd.LoadInt64x2Slice(v0) switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) case "Max": @@ -3921,6 +4380,16 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3961,6 +4430,16 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) case "MaskedXor": @@ -4040,6 +4519,55 @@ func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } +func testInt64x4Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + vec3 := simd.LoadInt64x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x4Unary(t *testing.T, v0 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -4108,6 +4636,16 @@ func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4148,6 +4686,16 @@ func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) case "MaskedXor": @@ -4227,6 +4775,55 @@ func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } +func testInt64x8Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + vec3 := simd.LoadInt64x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x8Unary(t *testing.T, v0 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 @@ -4961,6 +5558,12 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4999,6 +5602,12 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) @@ -5076,6 +5685,55 @@ func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } +func testUint16x8Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadUint16x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadUint16x8Slice(v2) + vec3 := simd.LoadInt16x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() var gotv simd.Uint16x8 @@ -5148,6 +5806,12 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5186,6 +5850,12 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) @@ -5263,6 +5933,55 @@ func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } +func testUint16x16Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadUint16x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadUint16x16Slice(v2) + vec3 := simd.LoadInt16x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() var gotv simd.Uint16x16 @@ -5325,6 +6044,12 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -5361,6 +6086,12 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) @@ -5438,6 +6169,55 @@ func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } +func testUint16x32Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadUint16x32Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadUint16x32Slice(v2) + vec3 := simd.LoadInt16x32Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() var gotv simd.Uint16x32 @@ -5502,6 +6282,16 @@ func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5538,6 +6328,16 @@ func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": @@ -5638,6 +6438,55 @@ func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 } } +func testUint32x4Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadUint32x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadUint32x4Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x4 @@ -5751,6 +6600,16 @@ func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5787,6 +6646,16 @@ func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) case "MaskedXor": @@ -5887,6 +6756,55 @@ func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 } } +func testUint32x8Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadUint32x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadUint32x8Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x8 @@ -5996,6 +6914,16 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, gotv = vec0.Min(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6032,6 +6960,16 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) case "MaskedXor": @@ -6111,6 +7049,55 @@ func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int } } +func testUint32x16Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadUint32x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadUint32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x16 @@ -6222,6 +7209,16 @@ func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.MulEvenWiden(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6260,6 +7257,16 @@ func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) case "MaskedXor": @@ -6339,6 +7346,55 @@ func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 } } +func testUint64x2Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadUint64x2Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadUint64x2Slice(v2) + vec3 := simd.LoadInt64x2Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x2Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() var gotv simd.Uint64x2 @@ -6401,6 +7457,16 @@ func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.MulEvenWiden(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6439,6 +7505,16 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) case "MaskedXor": @@ -6518,6 +7594,55 @@ func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 } } +func testUint64x4Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadUint64x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadUint64x4Slice(v2) + vec3 := simd.LoadInt64x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() var gotv simd.Uint64x4 @@ -6580,6 +7705,16 @@ func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.MulEvenWiden(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6618,6 +7753,16 @@ func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) case "MaskedXor": @@ -6697,6 +7842,55 @@ func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 } } +func testUint64x8Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadUint64x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadUint64x8Slice(v2) + vec3 := simd.LoadInt64x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() var gotv simd.Uint64x8 @@ -6737,3 +7931,54 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 } } } + +/* The operations below cannot be tested via wrappers, please test them directly */ + +// CeilSuppressExceptionWithPrecision +// CeilWithPrecision +// DiffWithCeilSuppressExceptionWithPrecision +// DiffWithCeilWithPrecision +// DiffWithFloorSuppressExceptionWithPrecision +// DiffWithFloorWithPrecision +// DiffWithRoundSuppressExceptionWithPrecision +// DiffWithRoundWithPrecision +// DiffWithTruncSuppressExceptionWithPrecision +// DiffWithTruncWithPrecision +// FloorSuppressExceptionWithPrecision +// FloorWithPrecision +// GetElem +// MaskedCeilSuppressExceptionWithPrecision +// MaskedCeilWithPrecision +// MaskedDiffWithCeilSuppressExceptionWithPrecision +// MaskedDiffWithCeilWithPrecision +// MaskedDiffWithFloorSuppressExceptionWithPrecision +// MaskedDiffWithFloorWithPrecision +// MaskedDiffWithRoundSuppressExceptionWithPrecision +// MaskedDiffWithRoundWithPrecision +// MaskedDiffWithTruncSuppressExceptionWithPrecision +// MaskedDiffWithTruncWithPrecision +// MaskedFloorSuppressExceptionWithPrecision +// MaskedFloorWithPrecision +// MaskedRotateAllLeft +// MaskedRotateAllRight +// MaskedRoundSuppressExceptionWithPrecision +// MaskedRoundWithPrecision +// MaskedShiftAllLeft +// MaskedShiftAllLeftAndFillUpperFrom +// MaskedShiftAllRight +// MaskedShiftAllRightAndFillUpperFrom +// MaskedShiftAllRightSignExtended +// MaskedTruncSuppressExceptionWithPrecision +// MaskedTruncWithPrecision +// RotateAllLeft +// RotateAllRight +// RoundSuppressExceptionWithPrecision +// RoundWithPrecision +// SetElem +// ShiftAllLeft +// ShiftAllLeftAndFillUpperFrom +// ShiftAllRight +// ShiftAllRightAndFillUpperFrom +// ShiftAllRightSignExtended +// TruncSuppressExceptionWithPrecision +// TruncWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 5037e4e024e1c9..330ad6aca2a703 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -5178,6 +5178,254 @@ func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 +/* MaskedRotateAllLeft */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Uint64x8 + +/* MaskedRotateAllRight */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Uint64x8 + +/* MaskedRotateLeft */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateLeft(y Int32x4, z Mask32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateLeft(y Int32x8, z Mask32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateLeft(y Int32x16, z Mask32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateLeft(y Int64x2, z Mask64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateLeft(y Int64x4, z Mask64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateLeft(y Int64x8, z Mask64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateLeft(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateLeft(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateLeft(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateLeft(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateLeft(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateLeft(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedRotateRight */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateRight(y Int32x4, z Mask32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateRight(y Int32x8, z Mask32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateRight(y Int32x16, z Mask32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateRight(y Int64x2, z Mask64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateRight(y Int64x4, z Mask64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateRight(y Int64x8, z Mask64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateRight(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateRight(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateRight(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateRight(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 + /* MaskedRoundSuppressExceptionWithPrecision */ // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. @@ -5447,1884 +5695,3826 @@ func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 -/* MaskedSqrt */ +/* MaskedShiftAllLeft */ -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Int64x2 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Int64x4 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Int64x8 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Uint64x2 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Uint64x4 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Uint64x8 -/* MaskedSub */ +/* MaskedShiftAllLeftAndFillUpperFrom */ -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 -// Sub subtracts corresponding elements of two vectors. +/* MaskedShiftAllRight */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Int64x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Uint64x8 -// Sub subtracts corresponding elements of two vectors. +/* MaskedShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 -/* MaskedTruncSuppressExceptionWithPrecision */ +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -/* MaskedTruncWithPrecision */ +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +/* MaskedShiftAllRightSignExtended */ + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRightSignExtended(y uint64, z Mask64x2) Int64x2 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRightSignExtended(y uint64, z Mask64x4) Int64x4 -/* MaskedUnsignedSignedQuadDotProdAccumulate */ +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRightSignExtended(y uint64, z Mask64x8) Int64x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +/* MaskedShiftLeft */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftLeft(y Int16x8, z Mask16x8) Int16x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftLeft(y Int16x16, z Mask16x16) Int16x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftLeft(y Int16x32, z Mask16x32) Int16x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftLeft(y Int32x4, z Mask32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftLeft(y Int32x8, z Mask32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftLeft(y Int32x16, z Mask32x16) Int32x16 -/* MaskedXor */ +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftLeft(y Int64x2, z Mask64x2) Int64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftLeft(y Int64x4, z Mask64x4) Int64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftLeft(y Int64x8, z Mask64x8) Int64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftLeft(y Uint16x8, z Mask16x8) Uint16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftLeft(y Uint16x16, z Mask16x16) Uint16x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftLeft(y Uint16x32, z Mask16x32) Uint16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftLeft(y Uint32x4, z Mask32x4) Uint32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftLeft(y Uint32x8, z Mask32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftLeft(y Uint32x16, z Mask32x16) Uint32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftLeft(y Uint64x2, z Mask64x2) Uint64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftLeft(y Uint64x4, z Mask64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftLeft(y Uint64x8, z Mask64x8) Uint64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +/* MaskedShiftLeftAndFillUpperFrom */ + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -/* Max */ +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) Max(y Float32x16) Float32x16 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) Max(y Float64x8) Float64x8 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) Max(y Int8x64) Int8x64 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 -// Max computes the maximum of corresponding elements. +/* MaskedShiftRight */ + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRight(y Int16x8, z Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) Max(y Int16x32) Int16x32 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRight(y Int16x16, z Mask16x16) Int16x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRight(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRight(y Int32x4, z Mask32x4) Int32x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) Max(y Int32x16) Int32x16 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRight(y Int32x8, z Mask32x8) Int32x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Max(y Int64x2) Int64x2 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRight(y Int32x16, z Mask32x16) Int32x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Max(y Int64x4) Int64x4 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRight(y Int64x2, z Mask64x2) Int64x2 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Max(y Int64x8) Int64x8 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRight(y Int64x4, z Mask64x4) Int64x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRight(y Int64x8, z Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRight(y Uint16x8, z Mask16x8) Uint16x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Max(y Uint8x64) Uint8x64 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRight(y Uint16x16, z Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRight(y Uint16x32, z Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRight(y Uint32x4, z Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Max(y Uint16x32) Uint16x32 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRight(y Uint32x8, z Mask32x8) Uint32x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRight(y Uint32x16, z Mask32x16) Uint32x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRight(y Uint64x2, z Mask64x2) Uint64x2 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Max(y Uint32x16) Uint32x16 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRight(y Uint64x4, z Mask64x4) Uint64x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Max(y Uint64x2) Uint64x2 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRight(y Uint64x8, z Mask64x8) Uint64x8 -// Max computes the maximum of corresponding elements. +/* MaskedShiftRightAndFillUpperFrom */ + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Max(y Uint64x4) Uint64x4 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRightAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Max(y Uint64x8) Uint64x8 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRightAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -/* Min */ +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRightAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x4) Min(y Float32x4) Float32x4 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRightAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x8) Min(y Float32x8) Float32x8 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRightAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) Min(y Float32x16) Float32x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRightAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x2) Min(y Float64x2) Float64x2 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRightAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x4) Min(y Float64x4) Float64x4 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRightAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) Min(y Float64x8) Float64x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRightAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSB, CPU Feature: AVX -func (x Int8x16) Min(y Int8x16) Int8x16 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSB, CPU Feature: AVX2 -func (x Int8x32) Min(y Int8x32) Int8x32 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) Min(y Int8x64) Int8x64 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSW, CPU Feature: AVX -func (x Int16x8) Min(y Int16x8) Int16x8 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSW, CPU Feature: AVX2 -func (x Int16x16) Min(y Int16x16) Int16x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) Min(y Int16x32) Int16x32 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSD, CPU Feature: AVX -func (x Int32x4) Min(y Int32x4) Int32x4 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSD, CPU Feature: AVX2 -func (x Int32x8) Min(y Int32x8) Int32x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) Min(y Int32x16) Int32x16 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 -// Min computes the minimum of corresponding elements. +/* MaskedShiftRightSignExtended */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Min(y Int64x2) Int64x2 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRightSignExtended(y Int16x8, z Mask16x8) Int16x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Min(y Int64x4) Int64x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRightSignExtended(y Int16x16, z Mask16x16) Int16x16 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Min(y Int64x8) Int64x8 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRightSignExtended(y Int16x32, z Mask16x32) Int16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUB, CPU Feature: AVX -func (x Uint8x16) Min(y Uint8x16) Uint8x16 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRightSignExtended(y Int32x4, z Mask32x4) Int32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUB, CPU Feature: AVX2 -func (x Uint8x32) Min(y Uint8x32) Uint8x32 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRightSignExtended(y Int32x8, z Mask32x8) Int32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Min(y Uint8x64) Uint8x64 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRightSignExtended(y Int32x16, z Mask32x16) Int32x16 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUW, CPU Feature: AVX -func (x Uint16x8) Min(y Uint16x8) Uint16x8 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRightSignExtended(y Int64x2, z Mask64x2) Int64x2 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUW, CPU Feature: AVX2 -func (x Uint16x16) Min(y Uint16x16) Uint16x16 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRightSignExtended(y Int64x4, z Mask64x4) Int64x4 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Min(y Uint16x32) Uint16x32 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRightSignExtended(y Int64x8, z Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUD, CPU Feature: AVX -func (x Uint32x4) Min(y Uint32x4) Uint32x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRightSignExtended(y Uint16x8, z Mask16x8) Uint16x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUD, CPU Feature: AVX2 -func (x Uint32x8) Min(y Uint32x8) Uint32x8 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRightSignExtended(y Uint16x16, z Mask16x16) Uint16x16 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Min(y Uint32x16) Uint32x16 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRightSignExtended(y Uint16x32, z Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Min(y Uint64x2) Uint64x2 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRightSignExtended(y Uint32x4, z Mask32x4) Uint32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Min(y Uint64x4) Uint64x4 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRightSignExtended(y Uint32x8, z Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Min(y Uint64x8) Uint64x8 - -/* Mul */ +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRightSignExtended(y Uint32x16, z Mask32x16) Uint32x16 -// Mul multiplies corresponding elements of two vectors. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x4) Mul(y Float32x4) Float32x4 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRightSignExtended(y Uint64x2, z Mask64x2) Uint64x2 -// Mul multiplies corresponding elements of two vectors. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x8) Mul(y Float32x8) Float32x8 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRightSignExtended(y Uint64x4, z Mask64x4) Uint64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) Mul(y Float32x16) Float32x16 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRightSignExtended(y Uint64x8, z Mask64x8) Uint64x8 -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x2) Mul(y Float64x2) Float64x2 +/* MaskedSqrt */ -// Mul multiplies corresponding elements of two vectors. +// Sqrt computes the square root of each element. // -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x4) Mul(y Float64x4) Float64x4 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 -// Mul multiplies corresponding elements of two vectors, masked. +// Sqrt computes the square root of each element. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) Mul(y Float64x8) Float64x8 - -/* MulByPowOf2 */ +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 +/* MaskedSub */ -// MulByPowOf2 multiplies elements by a power of 2. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 - -/* MulEvenWiden */ +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 -/* MulHigh */ +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 -// MulHigh multiplies elements and stores the high part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 -// MulHigh multiplies elements and stores the high part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 -/* MulLow */ +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) MulLow(y Int16x8) Int16x8 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) MulLow(y Int16x16) Int16x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MulLow(y Int16x32) Int16x32 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) MulLow(y Int32x4) Int32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) MulLow(y Int32x8) Int32x8 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MulLow(y Int32x16) Int32x16 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulLow(y Int64x2) Int64x2 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulLow(y Int64x4) Int64x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulLow(y Int64x8) Int64x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 -/* NotEqual */ +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedTruncSuppressExceptionWithPrecision */ + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedTruncWithPrecision */ + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedUnsignedSignedQuadDotProdAccumulate */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + +/* MaskedXor */ + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 + +/* Max */ + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 + +/* Min */ + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) Min(y Float32x16) Float32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) Min(y Float64x8) Float64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) Min(y Int8x64) Int8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) Min(y Int16x32) Int16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) Min(y Int32x16) Int32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Min(y Int64x2) Int64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Min(y Int64x4) Int64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Min(y Int64x8) Int64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Min(y Uint8x64) Uint8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Min(y Uint16x32) Uint16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Min(y Uint32x16) Uint32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Min(y Uint64x2) Uint64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Min(y Uint64x4) Uint64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Min(y Uint64x8) Uint64x8 + +/* Mul */ + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) Mul(y Float32x16) Float32x16 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) Mul(y Float64x8) Float64x8 + +/* MulByPowOf2 */ + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 + +/* MulEvenWiden */ + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 + +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +/* MulLow */ + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) MulLow(y Int16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) MulLow(y Int16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MulLow(y Int16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) MulLow(y Int32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) MulLow(y Int32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MulLow(y Int32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulLow(y Int64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulLow(y Int64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulLow(y Int64x8) Int64x8 + +/* NotEqual */ + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +/* Or */ + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x4) Or(y Float32x4) Float32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x8) Or(y Float32x8) Float32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x16) Or(y Float32x16) Float32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x2) Or(y Float64x2) Float64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x4) Or(y Float64x4) Float64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x8) Or(y Float64x8) Float64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) Or(y Int32x16) Int32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Or(y Int64x8) Int64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +/* PairDotProd */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 + +/* PairDotProdAccumulate */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 + +/* PairwiseAdd */ + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 + +/* PairwiseSub */ + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 + +/* PopCount */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) PopCount() Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) PopCount() Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) PopCount() Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) PopCount() Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) PopCount() Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) PopCount() Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) PopCount() Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) PopCount() Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) PopCount() Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) PopCount() Int64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) PopCount() Int64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) PopCount() Int64x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) PopCount() Uint8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) PopCount() Uint8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) PopCount() Uint8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) PopCount() Uint16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) PopCount() Uint16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) PopCount() Uint16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) PopCount() Uint32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) PopCount() Uint32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) PopCount() Uint32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) PopCount() Uint64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) PopCount() Uint64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) PopCount() Uint64x8 + +/* RotateAllLeft */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllLeft(imm8 uint8) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllLeft(imm8 uint8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllLeft(imm8 uint8) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllLeft(imm8 uint8) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllLeft(imm8 uint8) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllLeft(imm8 uint8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllLeft(imm8 uint8) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllLeft(imm8 uint8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllLeft(imm8 uint8) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllLeft(imm8 uint8) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllLeft(imm8 uint8) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllLeft(imm8 uint8) Uint64x8 + +/* RotateAllRight */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllRight(imm8 uint8) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllRight(imm8 uint8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllRight(imm8 uint8) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllRight(imm8 uint8) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllRight(imm8 uint8) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllRight(imm8 uint8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllRight(imm8 uint8) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllRight(imm8 uint8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllRight(imm8 uint8) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllRight(imm8 uint8) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllRight(imm8 uint8) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllRight(imm8 uint8) Uint64x8 + +/* RotateLeft */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateLeft(y Int32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateLeft(y Int32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateLeft(y Int32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateLeft(y Int64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateLeft(y Int64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateLeft(y Int64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 + +/* RotateRight */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateRight(y Int32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateRight(y Int32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateRight(y Int32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateRight(y Int64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateRight(y Int64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateRight(y Int64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 + +/* Round */ + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 + +/* RoundSuppressExceptionWithPrecision */ + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +/* RoundWithPrecision */ + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 + +/* SaturatedAdd */ + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 + +/* SaturatedPairDotProdAccumulate */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 + +/* SaturatedPairwiseAdd */ + +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) NotEqual(y Float32x4) Mask32x4 +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedPairwiseSub */ + +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) NotEqual(y Float32x8) Mask32x8 +// Asm: VPHSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) NotEqual(y Float32x16) Mask32x16 +// Asm: VPHSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedSub */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) NotEqual(y Float64x2) Mask64x2 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) NotEqual(y Float64x4) Mask64x4 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) NotEqual(y Float64x8) Mask64x8 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) NotEqual(y Int8x64) Mask8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) NotEqual(y Int16x32) Mask16x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) NotEqual(y Int32x16) Mask32x16 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) NotEqual(y Int64x8) Mask64x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedUnsignedSignedQuadDotProdAccumulate */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SetElem */ + +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 +// Asm: VPINSRB, CPU Feature: AVX +func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 +// Asm: VPINSRW, CPU Feature: AVX +func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 +// Asm: VPINSRD, CPU Feature: AVX +func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 +// Asm: VPINSRQ, CPU Feature: AVX +func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 +// Asm: VPINSRB, CPU Feature: AVX +func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +// Asm: VPINSRW, CPU Feature: AVX +func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 -/* Or */ +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// SetElem sets a single constant-indexed element's value. // -// Asm: VORPS, CPU Feature: AVX -func (x Float32x4) Or(y Float32x4) Float32x4 +// Asm: VPINSRQ, CPU Feature: AVX +func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 -// Or performs a bitwise OR operation between two vectors. +/* ShiftAllLeft */ + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPS, CPU Feature: AVX -func (x Float32x8) Or(y Float32x8) Float32x8 +// Asm: VPSLLW, CPU Feature: AVX +func (x Int16x8) ShiftAllLeft(y uint64) Int16x8 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX +func (x Int32x4) ShiftAllLeft(y uint64) Int32x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX +func (x Int64x2) ShiftAllLeft(y uint64) Int64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX2 +func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Or(y Float32x16) Float32x16 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x2) Or(y Float64x2) Float64x2 +// Asm: VPSLLW, CPU Feature: AVX +func (x Uint16x8) ShiftAllLeft(y uint64) Uint16x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x4) Or(y Float64x4) Float64x4 +// Asm: VPSLLW, CPU Feature: AVX2 +func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Or(y Float64x8) Float64x8 +// Asm: VPSLLD, CPU Feature: AVX +func (x Uint32x4) ShiftAllLeft(y uint64) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX -func (x Int8x16) Or(y Int8x16) Int8x16 +// Asm: VPSLLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int8x32) Or(y Int8x32) Int8x32 +// Asm: VPSLLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllLeft(y uint64) Uint64x2 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX -func (x Int16x8) Or(y Int16x8) Int16x8 +// Asm: VPSLLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int16x16) Or(y Int16x16) Int16x16 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 -// Or performs a bitwise OR operation between two vectors. +/* ShiftAllLeftAndFillUpperFrom */ + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Int32x4) Or(y Int32x4) Int32x4 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int32x8) Or(y Int32x8) Int32x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) Or(y Int32x16) Int32x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Int64x2) Or(y Int64x2) Int64x2 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int64x4) Or(y Int64x4) Int64x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Or(y Int64x8) Int64x8 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint8x16) Or(y Uint8x16) Uint8x16 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint16x8) Or(y Uint16x8) Uint16x8 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Or(y Uint32x16) Uint32x16 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Or(y Uint64x8) Uint64x8 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 -/* PairDotProd */ +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +/* ShiftAllRight */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 +// Asm: VPSRLW, CPU Feature: AVX +func (x Int16x8) ShiftAllRight(y uint64) Int16x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 +// Asm: VPSRLW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllRight(y uint64) Int16x16 -/* PairDotProdAccumulate */ +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX +func (x Int32x4) ShiftAllRight(y uint64) Int32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllRight(y uint64) Int32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +// Asm: VPSRLQ, CPU Feature: AVX +func (x Int64x2) ShiftAllRight(y uint64) Int64x2 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Int64x4) ShiftAllRight(y uint64) Int64x4 -/* PairwiseAdd */ +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRight(y uint64) Int64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 +// Asm: VPSRLW, CPU Feature: AVX +func (x Uint16x8) ShiftAllRight(y uint64) Uint16x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 +// Asm: VPSRLW, CPU Feature: AVX2 +func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 +// Asm: VPSRLD, CPU Feature: AVX +func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 +// Asm: VPSRLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +/* ShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 -/* PairwiseSub */ +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 -/* PopCount */ +/* ShiftAllRightSignExtended */ -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) PopCount() Int8x16 +// Asm: VPSRAW, CPU Feature: AVX +func (x Int16x8) ShiftAllRightSignExtended(y uint64) Int16x8 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) PopCount() Int8x32 +// Asm: VPSRAW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllRightSignExtended(y uint64) Int16x16 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) PopCount() Int8x64 +// Asm: VPSRAD, CPU Feature: AVX +func (x Int32x4) ShiftAllRightSignExtended(y uint64) Int32x4 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) PopCount() Int16x8 +// Asm: VPSRAD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllRightSignExtended(y uint64) Int32x8 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) PopCount() Int16x16 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightSignExtended(y uint64) Int64x2 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) PopCount() Int16x32 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) PopCount() Int32x4 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 -// PopCount counts the number of set bits in each element. +/* ShiftLeft */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) PopCount() Int32x8 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) PopCount() Int32x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) PopCount() Int64x2 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) PopCount() Int64x4 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Int32x4) ShiftLeft(y Int32x4) Int32x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) PopCount() Int64x8 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) PopCount() Uint8x16 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) PopCount() Uint8x32 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Int64x2) ShiftLeft(y Int64x2) Int64x2 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) PopCount() Uint8x64 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) PopCount() Uint16x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) PopCount() Uint16x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) PopCount() Uint16x32 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) PopCount() Uint32x4 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftLeft(y Uint32x4) Uint32x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) PopCount() Uint32x8 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) PopCount() Uint32x16 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) PopCount() Uint64x2 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Uint64x2) ShiftLeft(y Uint64x2) Uint64x2 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) PopCount() Uint64x4 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) PopCount() Uint64x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 -/* Round */ +/* ShiftLeftAndFillUpperFrom */ -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 -/* RoundSuppressExceptionWithPrecision */ +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 -/* RoundWithPrecision */ +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 -/* SaturatedAdd */ +/* ShiftRight */ -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRight(y Int16x8) Int16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRight(y Int16x16) Int16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRight(y Int16x32) Int16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Int32x4) ShiftRight(y Int32x4) Int32x4 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Int32x8) ShiftRight(y Int32x8) Int32x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRight(y Int32x16) Int32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX -func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Int64x2) ShiftRight(y Int64x2) Int64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Int64x4) ShiftRight(y Int64x4) Int64x4 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRight(y Int64x8) Int64x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 - -/* SaturatedPairDotProdAccumulate */ +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRight(y Uint32x4) Uint32x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 -/* SaturatedPairwiseAdd */ +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Uint64x2) ShiftRight(y Uint64x2) Uint64x2 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 -/* SaturatedPairwiseSub */ +/* ShiftRightAndFillUpperFrom */ -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPHSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPHSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 -/* SaturatedSub */ +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 -/* SaturatedUnsignedSignedPairDotProd */ +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +/* ShiftRightSignExtended */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightSignExtended(y Int16x8) Int16x8 -/* SaturatedUnsignedSignedQuadDotProdAccumulate */ +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightSignExtended(y Int16x16) Int16x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightSignExtended(y Int16x32) Int16x32 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Int32x4) ShiftRightSignExtended(y Int32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Int32x8) ShiftRightSignExtended(y Int32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightSignExtended(y Int32x16) Int32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightSignExtended(y Int64x2) Int64x2 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightSignExtended(y Int64x4) Int64x4 -/* SetElem */ +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightSignExtended(y Int64x8) Int64x8 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightSignExtended(y Uint16x8) Uint16x8 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 /* Sign */ From 10c96219363778fb421c5c974aac9c06c0c7a181 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 26 Jun 2025 04:07:48 +0000 Subject: [PATCH 050/139] [dev.simd] cmd/compile, simd: add galois field operations This CL is generated by CL 684275. Change-Id: Ie1efd0979af0ef0a56781bf9013071bf4d2c52c5 Reviewed-on: https://go-review.googlesource.com/c/go/+/684175 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 29 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 18 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 18 + .../internal/ssa/_gen/simdgenericOps.go | 18 + src/cmd/compile/internal/ssa/opGen.go | 411 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 303 +++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 38 ++ src/simd/simd_wrapped_test.go | 16 + src/simd/stubs_amd64.go | 150 +++++++ 9 files changed, 1000 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 6c1d365bfa7f9b..999f3c200ce798 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -118,6 +118,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPEQD256, ssa.OpAMD64VPCMPEQQ128, ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VGF2P8MULB128, + ssa.OpAMD64VGF2P8MULB256, + ssa.OpAMD64VGF2P8MULB512, ssa.OpAMD64VPCMPGTB128, ssa.OpAMD64VPCMPGTB256, ssa.OpAMD64VPCMPGTW128, @@ -395,6 +398,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VGF2P8MULBMasked128, + ssa.OpAMD64VGF2P8MULBMasked256, + ssa.OpAMD64VGF2P8MULBMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, @@ -694,6 +700,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VGF2P8AFFINEQB128, + ssa.OpAMD64VGF2P8AFFINEQB256, + ssa.OpAMD64VGF2P8AFFINEQB512, + ssa.OpAMD64VGF2P8AFFINEINVQB128, + ssa.OpAMD64VGF2P8AFFINEINVQB256, + ssa.OpAMD64VGF2P8AFFINEINVQB512, ssa.OpAMD64VPSHLDW128, ssa.OpAMD64VPSHLDW256, ssa.OpAMD64VPSHLDW512, @@ -920,7 +932,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRQ128: p = simdFpgpImm8(s, v) - case ssa.OpAMD64VPSHLDWMasked128, + case ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, ssa.OpAMD64VPSHLDDMasked128, @@ -1055,6 +1073,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8MULBMasked128, + ssa.OpAMD64VGF2P8MULBMasked256, + ssa.OpAMD64VGF2P8MULBMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 968ded213133a2..6a4ded0ec496b9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -251,6 +251,15 @@ (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(GaloisFieldAffineTransformUint8x16 [a] x y) => (VGF2P8AFFINEQB128 [a] x y) +(GaloisFieldAffineTransformUint8x32 [a] x y) => (VGF2P8AFFINEQB256 [a] x y) +(GaloisFieldAffineTransformUint8x64 [a] x y) => (VGF2P8AFFINEQB512 [a] x y) +(GaloisFieldAffineTransformInversedUint8x16 [a] x y) => (VGF2P8AFFINEINVQB128 [a] x y) +(GaloisFieldAffineTransformInversedUint8x32 [a] x y) => (VGF2P8AFFINEINVQB256 [a] x y) +(GaloisFieldAffineTransformInversedUint8x64 [a] x y) => (VGF2P8AFFINEINVQB512 [a] x y) +(GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) +(GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) +(GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) (GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) @@ -607,6 +616,15 @@ (MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index cbddbe0ff6ef7d..5e627e696e96f0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -719,7 +719,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -727,7 +729,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -735,7 +739,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -894,10 +900,22 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 0f3d3f8214cd22..4907b78d12e6d2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1365,6 +1365,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, {name: "GreaterUint8x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, {name: "LessUint8x16", argLength: 2, commutative: false}, @@ -1372,6 +1373,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x16", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x16", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x16", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x16", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x16", argLength: 3, commutative: false}, {name: "MaskedLessUint8x16", argLength: 3, commutative: false}, @@ -1399,6 +1401,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, {name: "GreaterUint8x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, {name: "LessUint8x32", argLength: 2, commutative: false}, @@ -1406,6 +1409,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x32", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x32", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x32", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x32", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x32", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x32", argLength: 3, commutative: false}, {name: "MaskedLessUint8x32", argLength: 3, commutative: false}, @@ -1431,6 +1435,7 @@ func simdGenericOps() []opData { {name: "AddUint8x64", argLength: 2, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, @@ -1438,6 +1443,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x64", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x64", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x64", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x64", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x64", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x64", argLength: 3, commutative: false}, {name: "MaskedLessUint8x64", argLength: 3, commutative: false}, @@ -1784,7 +1790,19 @@ func simdGenericOps() []opData { {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 2bdbd5156e1984..906bd74cdcbd11 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1912,7 +1912,9 @@ const ( OpAMD64VPMINUQ512 OpAMD64VPMULUDQ512 OpAMD64VPAVGB128 + OpAMD64VGF2P8MULB128 OpAMD64VPAVGBMasked128 + OpAMD64VGF2P8MULBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 OpAMD64VPMADDUBSWMasked128 @@ -1920,7 +1922,9 @@ const ( OpAMD64VPMINUB128 OpAMD64VPMADDUBSW128 OpAMD64VPAVGB256 + OpAMD64VGF2P8MULB256 OpAMD64VPAVGBMasked256 + OpAMD64VGF2P8MULBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 OpAMD64VPMADDUBSWMasked256 @@ -1928,7 +1932,9 @@ const ( OpAMD64VPMINUB256 OpAMD64VPMADDUBSW256 OpAMD64VPAVGB512 + OpAMD64VGF2P8MULB512 OpAMD64VPAVGBMasked512 + OpAMD64VGF2P8MULBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 OpAMD64VPMADDUBSWMasked512 @@ -2087,11 +2093,23 @@ const ( OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 + OpAMD64VGF2P8AFFINEQB128 + OpAMD64VGF2P8AFFINEINVQB128 OpAMD64VPCMPUBMasked128 + OpAMD64VGF2P8AFFINEQBMasked128 + OpAMD64VGF2P8AFFINEINVQBMasked128 OpAMD64VPCMPUB256 + OpAMD64VGF2P8AFFINEQB256 + OpAMD64VGF2P8AFFINEINVQB256 OpAMD64VPCMPUBMasked256 + OpAMD64VGF2P8AFFINEQBMasked256 + OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VPCMPUB512 + OpAMD64VGF2P8AFFINEQB512 + OpAMD64VGF2P8AFFINEINVQB512 OpAMD64VPCMPUBMasked512 + OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VGF2P8AFFINEINVQBMasked512 OpARMADD OpARMADDconst @@ -5680,6 +5698,7 @@ const ( OpAndNotUint8x16 OpAverageUint8x16 OpEqualUint8x16 + OpGaloisFieldMulUint8x16 OpGreaterUint8x16 OpGreaterEqualUint8x16 OpLessUint8x16 @@ -5687,6 +5706,7 @@ const ( OpMaskedAddUint8x16 OpMaskedAverageUint8x16 OpMaskedEqualUint8x16 + OpMaskedGaloisFieldMulUint8x16 OpMaskedGreaterUint8x16 OpMaskedGreaterEqualUint8x16 OpMaskedLessUint8x16 @@ -5714,6 +5734,7 @@ const ( OpAndNotUint8x32 OpAverageUint8x32 OpEqualUint8x32 + OpGaloisFieldMulUint8x32 OpGreaterUint8x32 OpGreaterEqualUint8x32 OpLessUint8x32 @@ -5721,6 +5742,7 @@ const ( OpMaskedAddUint8x32 OpMaskedAverageUint8x32 OpMaskedEqualUint8x32 + OpMaskedGaloisFieldMulUint8x32 OpMaskedGreaterUint8x32 OpMaskedGreaterEqualUint8x32 OpMaskedLessUint8x32 @@ -5746,6 +5768,7 @@ const ( OpAddUint8x64 OpAverageUint8x64 OpEqualUint8x64 + OpGaloisFieldMulUint8x64 OpGreaterUint8x64 OpGreaterEqualUint8x64 OpLessUint8x64 @@ -5753,6 +5776,7 @@ const ( OpMaskedAddUint8x64 OpMaskedAverageUint8x64 OpMaskedEqualUint8x64 + OpMaskedGaloisFieldMulUint8x64 OpMaskedGreaterUint8x64 OpMaskedGreaterEqualUint8x64 OpMaskedLessUint8x64 @@ -6099,8 +6123,20 @@ const ( OpRotateAllRightUint64x8 OpShiftAllLeftAndFillUpperFromUint64x8 OpShiftAllRightAndFillUpperFromUint64x8 + OpGaloisFieldAffineTransformUint8x16 + OpGaloisFieldAffineTransformInversedUint8x16 OpGetElemUint8x16 + OpMaskedGaloisFieldAffineTransformUint8x16 + OpMaskedGaloisFieldAffineTransformInversedUint8x16 OpSetElemUint8x16 + OpGaloisFieldAffineTransformUint8x32 + OpGaloisFieldAffineTransformInversedUint8x32 + OpMaskedGaloisFieldAffineTransformUint8x32 + OpMaskedGaloisFieldAffineTransformInversedUint8x32 + OpGaloisFieldAffineTransformUint8x64 + OpGaloisFieldAffineTransformInversedUint8x64 + OpMaskedGaloisFieldAffineTransformUint8x64 + OpMaskedGaloisFieldAffineTransformInversedUint8x64 ) var opcodeTable = [...]opInfo{ @@ -29452,6 +29488,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB128", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked128", argLen: 3, @@ -29468,6 +29518,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked128", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked128", argLen: 3, @@ -29574,6 +29639,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB256", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked256", argLen: 3, @@ -29590,6 +29669,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked256", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked256", argLen: 3, @@ -29696,6 +29790,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB512", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked512", argLen: 3, @@ -29712,6 +29820,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked512", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked512", argLen: 3, @@ -32144,6 +32267,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked128", auxType: auxInt8, @@ -32161,6 +32314,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUB256", auxType: auxInt8, @@ -32177,6 +32362,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked256", auxType: auxInt8, @@ -32194,6 +32409,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUB512", auxType: auxInt8, @@ -32210,6 +32457,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked512", auxType: auxInt8, @@ -32227,6 +32504,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ADD", @@ -66684,6 +66993,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x16", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x16", argLen: 2, @@ -66722,6 +67036,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x16", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x16", argLen: 3, @@ -66871,6 +67190,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x32", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x32", argLen: 2, @@ -66909,6 +67233,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x32", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x32", argLen: 3, @@ -67047,6 +67376,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x64", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x64", argLen: 2, @@ -67085,6 +67419,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x64", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x64", argLen: 3, @@ -69149,18 +69488,90 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "GaloisFieldAffineTransformUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "GetElemUint8x16", auxType: auxInt8, argLen: 1, generic: true, }, + { + name: "MaskedGaloisFieldAffineTransformUint8x16", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x16", + auxType: auxInt8, + argLen: 3, + generic: true, + }, { name: "SetElemUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GaloisFieldAffineTransformUint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformUint8x32", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x32", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "GaloisFieldAffineTransformUint8x64", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x64", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d7aa0339e7c8c0..22085dc80eadeb 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1439,6 +1439,27 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplySubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true + case OpGaloisFieldAffineTransformInversedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v) + case OpGaloisFieldAffineTransformInversedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v) + case OpGaloisFieldAffineTransformInversedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v) + case OpGaloisFieldAffineTransformUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v) + case OpGaloisFieldAffineTransformUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v) + case OpGaloisFieldAffineTransformUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v) + case OpGaloisFieldMulUint8x16: + v.Op = OpAMD64VGF2P8MULB128 + return true + case OpGaloisFieldMulUint8x32: + v.Op = OpAMD64VGF2P8MULB256 + return true + case OpGaloisFieldMulUint8x64: + v.Op = OpAMD64VGF2P8MULB512 + return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2268,6 +2289,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v) case OpMaskedFusedMultiplySubAddFloat64x8: return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v) + case OpMaskedGaloisFieldAffineTransformUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v) + case OpMaskedGaloisFieldAffineTransformUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v) + case OpMaskedGaloisFieldAffineTransformUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v) + case OpMaskedGaloisFieldMulUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v) + case OpMaskedGaloisFieldMulUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v) + case OpMaskedGaloisFieldMulUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -31510,6 +31549,96 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) + // result: (VGF2P8AFFINEINVQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) + // result: (VGF2P8AFFINEINVQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) + // result: (VGF2P8AFFINEINVQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x16 [a] x y) + // result: (VGF2P8AFFINEQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x32 [a] x y) + // result: (VGF2P8AFFINEQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x64 [a] x y) + // result: (VGF2P8AFFINEQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { v_0 := v.Args[0] // match: (GetElemInt16x8 [a] x) @@ -38990,6 +39119,180 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x16 x y mask) + // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x32 x y mask) + // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x64 x y mask) + // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d20c9392936be5..d14b6be4255b0c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -262,6 +262,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) @@ -618,6 +627,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) @@ -2197,3 +2215,23 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } + +func opGaloisFieldAffineTransform(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[0].Op == ssa.OpConst8 { + return s.newValue2I(op, t, args[0].AuxInt, args[0], args[1]) + } + plainPanicSimdImm(s) + return s.newValue2I(op, t, 0, args[0], args[1]) + } +} + +func opGaloisFieldAffineTransformMasked(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[0].Op == ssa.OpConst8 { + return s.newValue3I(op, t, args[0].AuxInt, args[0], args[1], args[3]) + } + plainPanicSimdImm(s) + return s.newValue3I(op, t, 0, args[0], args[1], args[3]) + } +} diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index ad828e9d3f81c3..6399136fb152a9 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -4884,6 +4884,8 @@ func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4922,6 +4924,8 @@ func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x16()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) case "MaskedMin": @@ -5106,6 +5110,8 @@ func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -5144,6 +5150,8 @@ func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x32()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) case "MaskedMin": @@ -5324,6 +5332,8 @@ func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.Add(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -5358,6 +5368,8 @@ func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x64()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) case "MaskedMin": @@ -7946,6 +7958,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // DiffWithTruncWithPrecision // FloorSuppressExceptionWithPrecision // FloorWithPrecision +// GaloisFieldAffineTransform +// GaloisFieldAffineTransformInversed // GetElem // MaskedCeilSuppressExceptionWithPrecision // MaskedCeilWithPrecision @@ -7959,6 +7973,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // MaskedDiffWithTruncWithPrecision // MaskedFloorSuppressExceptionWithPrecision // MaskedFloorWithPrecision +// MaskedGaloisFieldAffineTransform +// MaskedGaloisFieldAffineTransformInversed // MaskedRotateAllLeft // MaskedRotateAllRight // MaskedRoundSuppressExceptionWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 330ad6aca2a703..f20a9b17aeffac 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1426,6 +1426,81 @@ func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 +/* GaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 + /* GetElem */ // GetElem retrieves a single constant-indexed element's value. @@ -3494,6 +3569,81 @@ func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +/* MaskedGaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldMul(y Uint8x16, z Mask8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldMul(y Uint8x32, z Mask8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 + /* MaskedGreater */ // Greater compares for greater than. From 55665e1e3756c0181f7572c8766749695ed1516a Mon Sep 17 00:00:00 2001 From: David Chase Date: Sat, 28 Jun 2025 10:20:53 -0400 Subject: [PATCH 051/139] [dev.simd] cmd/compile: undoes reorder transform in prior commit, changes names paired with simdgen CL 684655 Change-Id: I819eb601c07b21747d8a1442eb1efbf9fa5aac1d Reviewed-on: https://go-review.googlesource.com/c/go/+/684775 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui --- .../compile/internal/ssagen/simdintrinsics.go | 44 +-- src/simd/stubs_amd64.go | 304 +++++++++--------- 2 files changed, 164 insertions(+), 184 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d14b6be4255b0c..87c1327f162866 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -262,12 +262,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opLen2Imm8(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opLen2Imm8(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opLen2Imm8(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) @@ -627,12 +627,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) @@ -2215,23 +2215,3 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } - -func opGaloisFieldAffineTransform(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - if args[0].Op == ssa.OpConst8 { - return s.newValue2I(op, t, args[0].AuxInt, args[0], args[1]) - } - plainPanicSimdImm(s) - return s.newValue2I(op, t, 0, args[0], args[1]) - } -} - -func opGaloisFieldAffineTransformMasked(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - if args[0].Op == ssa.OpConst8 { - return s.newValue3I(op, t, args[0].AuxInt, args[0], args[1], args[3]) - } - plainPanicSimdImm(s) - return s.newValue3I(op, t, 0, args[0], args[1], args[3]) - } -} diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index f20a9b17aeffac..e589378c72f438 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -632,37 +632,37 @@ func (x Float64x4) Ceil() Float64x4 // Const Immediate = 10. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) CeilSuppressExceptionWithPrecision(imm uint8) Float32x4 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) CeilSuppressExceptionWithPrecision(imm uint8) Float32x8 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) CeilSuppressExceptionWithPrecision(imm uint8) Float32x16 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) CeilSuppressExceptionWithPrecision(imm uint8) Float64x2 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) CeilSuppressExceptionWithPrecision(imm uint8) Float64x4 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) CeilSuppressExceptionWithPrecision(imm uint8) Float64x8 /* CeilWithPrecision */ @@ -670,37 +670,37 @@ func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 /* DiffWithCeilSuppressExceptionWithPrecision */ @@ -708,37 +708,37 @@ func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 10. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithCeilWithPrecision */ @@ -746,37 +746,37 @@ func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float6 // Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 /* DiffWithFloorSuppressExceptionWithPrecision */ @@ -784,37 +784,37 @@ func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 9. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithFloorWithPrecision */ @@ -822,37 +822,37 @@ func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float // Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 /* DiffWithRoundSuppressExceptionWithPrecision */ @@ -860,37 +860,37 @@ func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 8. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithRoundWithPrecision */ @@ -898,37 +898,37 @@ func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float // Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 /* DiffWithTruncSuppressExceptionWithPrecision */ @@ -936,37 +936,37 @@ func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 11. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithTruncWithPrecision */ @@ -974,37 +974,37 @@ func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float // Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 /* Div */ @@ -1260,37 +1260,37 @@ func (x Float64x4) Floor() Float64x4 // Const Immediate = 9. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) FloorSuppressExceptionWithPrecision(imm uint8) Float32x4 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) FloorSuppressExceptionWithPrecision(imm uint8) Float32x8 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) FloorSuppressExceptionWithPrecision(imm uint8) Float32x16 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) FloorSuppressExceptionWithPrecision(imm uint8) Float64x2 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) FloorSuppressExceptionWithPrecision(imm uint8) Float64x4 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) FloorSuppressExceptionWithPrecision(imm uint8) Float64x8 /* FloorWithPrecision */ @@ -1298,37 +1298,37 @@ func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 /* FusedMultiplyAdd */ @@ -1430,56 +1430,56 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransform(b uint8, y Uint64x2) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransform(b uint8, y Uint64x4) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransform(b uint8, y Uint64x8) Uint8x64 /* GaloisFieldAffineTransformInversed */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInversed(b uint8, y Uint64x2) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInversed(b uint8, y Uint64x4) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInversed(b uint8, y Uint64x8) Uint8x64 /* GaloisFieldMul */ @@ -1506,42 +1506,42 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Int8x16) GetElem(imm8 uint8) int8 +func (x Int8x16) GetElem(imm uint8) int8 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Int16x8) GetElem(imm8 uint8) int16 +func (x Int16x8) GetElem(imm uint8) int16 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRD, CPU Feature: AVX -func (x Int32x4) GetElem(imm8 uint8) int32 +func (x Int32x4) GetElem(imm uint8) int32 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRQ, CPU Feature: AVX -func (x Int64x2) GetElem(imm8 uint8) int64 +func (x Int64x2) GetElem(imm uint8) int64 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Uint8x16) GetElem(imm8 uint8) uint8 +func (x Uint8x16) GetElem(imm uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Uint16x8) GetElem(imm8 uint8) uint16 +func (x Uint16x8) GetElem(imm uint8) uint16 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRD, CPU Feature: AVX -func (x Uint32x4) GetElem(imm8 uint8) uint32 +func (x Uint32x4) GetElem(imm uint8) uint32 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRQ, CPU Feature: AVX -func (x Uint64x2) GetElem(imm8 uint8) uint64 +func (x Uint64x2) GetElem(imm uint8) uint64 /* Greater */ @@ -3573,56 +3573,56 @@ func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) MaskedGaloisFieldAffineTransform(b uint8, y Uint64x2, m Mask8x16) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) MaskedGaloisFieldAffineTransform(b uint8, y Uint64x4, m Mask8x32) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) MaskedGaloisFieldAffineTransform(b uint8, y Uint64x8, m Mask8x64) Uint8x64 /* MaskedGaloisFieldAffineTransformInversed */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(b uint8, y Uint64x2, m Mask8x16) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(b uint8, y Uint64x4, m Mask8x32) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(b uint8, y Uint64x8, m Mask8x64) Uint8x64 /* MaskedGaloisFieldMul */ @@ -8161,124 +8161,124 @@ func (x Uint64x8) PopCount() Uint64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllLeft(imm8 uint8) Int32x4 +func (x Int32x4) RotateAllLeft(imm uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllLeft(imm8 uint8) Int32x8 +func (x Int32x8) RotateAllLeft(imm uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllLeft(imm8 uint8) Int32x16 +func (x Int32x16) RotateAllLeft(imm uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllLeft(imm8 uint8) Int64x2 +func (x Int64x2) RotateAllLeft(imm uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllLeft(imm8 uint8) Int64x4 +func (x Int64x4) RotateAllLeft(imm uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllLeft(imm8 uint8) Int64x8 +func (x Int64x8) RotateAllLeft(imm uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllLeft(imm8 uint8) Uint32x4 +func (x Uint32x4) RotateAllLeft(imm uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllLeft(imm8 uint8) Uint32x8 +func (x Uint32x8) RotateAllLeft(imm uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllLeft(imm8 uint8) Uint32x16 +func (x Uint32x16) RotateAllLeft(imm uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllLeft(imm8 uint8) Uint64x2 +func (x Uint64x2) RotateAllLeft(imm uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllLeft(imm8 uint8) Uint64x4 +func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllLeft(imm8 uint8) Uint64x8 +func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 /* RotateAllRight */ // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllRight(imm8 uint8) Int32x4 +func (x Int32x4) RotateAllRight(imm uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllRight(imm8 uint8) Int32x8 +func (x Int32x8) RotateAllRight(imm uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllRight(imm8 uint8) Int32x16 +func (x Int32x16) RotateAllRight(imm uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllRight(imm8 uint8) Int64x2 +func (x Int64x2) RotateAllRight(imm uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllRight(imm8 uint8) Int64x4 +func (x Int64x4) RotateAllRight(imm uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllRight(imm8 uint8) Int64x8 +func (x Int64x8) RotateAllRight(imm uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllRight(imm8 uint8) Uint32x4 +func (x Uint32x4) RotateAllRight(imm uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllRight(imm8 uint8) Uint32x8 +func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRight(imm8 uint8) Uint32x16 +func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRight(imm8 uint8) Uint64x2 +func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRight(imm8 uint8) Uint64x4 +func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRight(imm8 uint8) Uint64x8 +func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 /* RotateLeft */ @@ -8436,37 +8436,37 @@ func (x Float64x4) Round() Float64x4 // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm uint8) Float32x4 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm uint8) Float32x8 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm uint8) Float32x16 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) RoundSuppressExceptionWithPrecision(imm uint8) Float64x2 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) RoundSuppressExceptionWithPrecision(imm uint8) Float64x4 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) RoundSuppressExceptionWithPrecision(imm uint8) Float64x8 /* RoundWithPrecision */ @@ -8474,37 +8474,37 @@ func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 /* SaturatedAdd */ @@ -9920,37 +9920,37 @@ func (x Float64x4) Trunc() Float64x4 // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) TruncSuppressExceptionWithPrecision(imm uint8) Float32x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) TruncSuppressExceptionWithPrecision(imm uint8) Float32x8 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) TruncSuppressExceptionWithPrecision(imm uint8) Float32x16 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) TruncSuppressExceptionWithPrecision(imm uint8) Float64x2 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) TruncSuppressExceptionWithPrecision(imm uint8) Float64x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) TruncSuppressExceptionWithPrecision(imm uint8) Float64x8 /* TruncWithPrecision */ @@ -9958,37 +9958,37 @@ func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ From ead249a2e2989c6775235058d38f0e33afdf752a Mon Sep 17 00:00:00 2001 From: David Chase Date: Sat, 28 Jun 2025 11:05:44 -0400 Subject: [PATCH 052/139] [dev.simd] cmd/compile: reorder operands for some simd operations This adds support for one ad hoc reordering, which requires a new intrinsic-to-ssa helper matching the name that is used in the generator (and this in the generated code). In this case, it is opLen{2,3}Imm8_2I which expects the immediate after the self (0) and first (1) parameters to the method, and before the mask if there is one. I.e., the immediate is arg 2 in the call. The changes to simdintrinsics and stubs are generated by simdgen CL 684019. Change-Id: Ia54aab9825d469a2f3efa6d1fb079242181c0ca6 Reviewed-on: https://go-review.googlesource.com/c/go/+/684776 Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 2 +- src/cmd/compile/internal/ssagen/intrinsics.go | 28 +++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 24 +++++----- src/simd/stubs_amd64.go | 48 +++++++++---------- 4 files changed, 65 insertions(+), 37 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 0c9d12620afdb4..fadac162820554 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1866,7 +1866,7 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in func simdReg(v *ssa.Value) int16 { t := v.Type if !t.IsSIMD() { - panic("simdReg: not a simd type") + base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) } switch t.Size() { case 8: diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 660047df1f2299..73e84077fd2b9d 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1684,6 +1684,34 @@ func opLen3Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallE } } +func opLen2Imm8_2I(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue2I(op, t, args[2].AuxInt< Date: Tue, 29 Apr 2025 22:55:40 -0400 Subject: [PATCH 053/139] [dev.simd] runtime: save scalar registers off stack in amd64 async preemption Asynchronous preemption must save all registers that could be in use by Go code. Currently, it saves all of these to the goroutine stack. As a result, the stack frame requirements of asynchronous preemption can be rather high. On amd64, this requires 368 bytes of stack space, most of which is the XMM registers. Several RISC architectures are around 0.5 KiB. As we add support for SIMD instructions, this is going to become a problem. The AVX-512 register state is 2.5 KiB. This well exceeds the nosplit limit, and even if it didn't, could constrain when we can asynchronously preempt goroutines on small stacks. This CL fixes this by moving pure scalar state stored in non-GP registers off the stack and into an allocated "extended register state" object. To reduce space overhead, we only allocate these objects as needed. While in the theoretical limit, every G could need this register state, in practice very few do at a time. However, we can't allocate when we're in the middle of saving the register state during an asynchronous preemption, so we reserve scratch space on every P to temporarily store the register state, which can then be copied out to an allocated state object later by Go code. This commit only implements this for amd64, since that's where we're about to add much more vector state, but it lays the groundwork for doing this on any architecture that could benefit. Change-Id: I123a95e21c11d5c10942d70e27f84d2d99bbf735 Reviewed-on: https://go-review.googlesource.com/c/go/+/680898 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Auto-Submit: Austin Clements --- src/runtime/export_test.go | 2 + src/runtime/lockrank.go | 5 +- src/runtime/mheap.go | 2 + src/runtime/mklockrank.go | 6 +- src/runtime/mkpreempt.go | 92 +++++++++++++++++++++--- src/runtime/preempt.go | 50 ++++++++----- src/runtime/preempt_amd64.go | 22 ++++++ src/runtime/preempt_amd64.s | 82 +++++++++++++--------- src/runtime/preempt_noxreg.go | 27 ++++++++ src/runtime/preempt_xreg.go | 127 ++++++++++++++++++++++++++++++++++ src/runtime/proc.go | 1 + src/runtime/runtime2.go | 9 +++ src/runtime/sizeof_test.go | 9 ++- 13 files changed, 368 insertions(+), 66 deletions(-) create mode 100644 src/runtime/preempt_amd64.go create mode 100644 src/runtime/preempt_noxreg.go create mode 100644 src/runtime/preempt_xreg.go diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 83cf301be49cc8..b3bb5d2c581090 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -555,6 +555,8 @@ type G = g type Sudog = sudog +type XRegPerG = xRegPerG + func Getg() *G { return getg() } diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 44015ce862d077..9821e499989951 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -70,6 +70,7 @@ const ( lockRankHchanLeaf // WB lockRankWbufSpans + lockRankXRegAlloc lockRankMheap lockRankMheapSpecial lockRankGlobalAlloc @@ -143,6 +144,7 @@ var lockNames = []string{ lockRankStackLarge: "stackLarge", lockRankHchanLeaf: "hchanLeaf", lockRankWbufSpans: "wbufSpans", + lockRankXRegAlloc: "xRegAlloc", lockRankMheap: "mheap", lockRankMheapSpecial: "mheapSpecial", lockRankGlobalAlloc: "globalAlloc", @@ -228,9 +230,10 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankXRegAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankXRegAlloc, lockRankMheap, lockRankMheapSpecial}, lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankPanic: {}, diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index f25dbb429d7f78..358de2f376dd7f 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -821,6 +821,8 @@ func (h *mheap) init() { } h.pages.init(&h.lock, &memstats.gcMiscSys, false) + + xRegInitAlloc() } // reclaim sweeps and reclaims at least npage pages into the heap. diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 46a063fdce569c..9c503369a35841 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -193,6 +193,9 @@ defer, # Below WB is the write barrier implementation. < wbufSpans; +# xRegState allocator +sched < xRegAlloc; + # Span allocator stackLarge, stackpool, @@ -205,7 +208,8 @@ stackLarge, # an mspanSpecial lock, and they're part of the malloc implementation. # Pinner bits might be freed by the span allocator. mheap, mspanSpecial < mheapSpecial; -mheap, mheapSpecial < globalAlloc; +# Fixallocs +mheap, mheapSpecial, xRegAlloc < globalAlloc; # Execution tracer events (with a P) hchan, diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index ec900a23d257e9..e3dd5046f3f3a0 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -9,8 +9,10 @@ package main import ( + "bytes" "flag" "fmt" + "go/format" "io" "log" "os" @@ -122,14 +124,19 @@ type gen struct { goarch string } -func (g *gen) asmHeader() { +func (g *gen) commonHeader() { fmt.Fprintf(g.w, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n") if beLe[g.goarch] { base := g.goarch[:len(g.goarch)-1] fmt.Fprintf(g.w, "//go:build %s || %sle\n\n", base, base) } +} + +func (g *gen) asmHeader() { + g.commonHeader() fmt.Fprintf(g.w, "#include \"go_asm.h\"\n") if g.goarch == "amd64" { + fmt.Fprintf(g.w, "#include \"go_tls.h\"\n") fmt.Fprintf(g.w, "#include \"asm_amd64.h\"\n") } fmt.Fprintf(g.w, "#include \"textflag.h\"\n\n") @@ -145,6 +152,43 @@ func (g *gen) label(l string) { fmt.Fprintf(g.w, "%s\n", l) } +// writeXRegs writes an architecture xregs file. +func writeXRegs(arch string, l *layout) { + var code bytes.Buffer + g := gen{&code, arch} + g.commonHeader() + fmt.Fprintf(g.w, ` +package runtime + +type xRegState struct { +`) + pos := 0 + for _, reg := range l.regs { + if reg.pos != pos { + log.Fatalf("padding not implemented") + } + typ := fmt.Sprintf("[%d]byte", reg.size) + switch { + case reg.size == 4 && reg.pos%4 == 0: + typ = "uint32" + case reg.size == 8 && reg.pos%8 == 0: + typ = "uint64" + } + fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ) + pos += reg.size + } + fmt.Fprintf(g.w, "}\n") + + path := fmt.Sprintf("preempt_%s.go", arch) + b, err := format.Source(code.Bytes()) + if err != nil { + log.Fatalf("formatting %s: %s", path, err) + } + if err := os.WriteFile(path, b, 0666); err != nil { + log.Fatal(err) + } +} + type layout struct { stack int regs []regPos @@ -152,7 +196,7 @@ type layout struct { } type regPos struct { - pos int + pos, size int saveOp string restoreOp string @@ -165,17 +209,17 @@ type regPos struct { } func (l *layout) add(op, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack}) + l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size}) l.stack += size } func (l *layout) add2(sop, rop, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack}) + l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size}) l.stack += size } func (l *layout) addSpecial(save, restore string, size int) { - l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack}) + l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size}) l.stack += size } @@ -239,6 +283,8 @@ func gen386(g *gen) { } func genAMD64(g *gen) { + const xReg = "AX" // *xRegState + p := g.p // Assign stack offsets. @@ -251,12 +297,13 @@ func genAMD64(g *gen) { l.add("MOVQ", reg, 8) } } - lSSE := layout{stack: l.stack, sp: "SP"} + lXRegs := layout{sp: xReg} // Non-GP registers for _, reg := range regNamesAMD64 { if strings.HasPrefix(reg, "X") { - lSSE.add("MOVUPS", reg, 16) + lXRegs.add("MOVUPS", reg, 16) } } + writeXRegs(g.goarch, &lXRegs) // TODO: MXCSR register? @@ -265,17 +312,40 @@ func genAMD64(g *gen) { p("// Save flags before clobbering them") p("PUSHFQ") p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP") - p("ADJSP $%d", lSSE.stack) + p("ADJSP $%d", l.stack) p("// But vet doesn't know ADJSP, so suppress vet stack checking") p("NOP SP") + p("// Save GPs") l.save(g) - lSSE.save(g) + // In general, the limitations on asynchronous preemption mean we only + // preempt in ABIInternal code. However, there's at least one exception to + // this: when we're in an open-coded transition between an ABIInternal + // function and an ABI0 call. We could more carefully arrange unsafe points + // to avoid ever landing in ABI0, but it's easy to just make this code not + // sensitive to the ABI we're preempting. The CALL to asyncPreempt2 will + // ensure we're in ABIInternal register state. + p("// Save extended register state to p.xRegs.scratch") + p("// Don't make assumptions about ABI register state. See mkpreempt.go") + p("get_tls(CX)") + p("MOVQ g(CX), R14") + p("MOVQ g_m(R14), %s", xReg) + p("MOVQ m_p(%s), %s", xReg, xReg) + p("LEAQ (p_xRegs+xRegPerP_scratch)(%s), %s", xReg, xReg) + lXRegs.save(g) + p("CALL ·asyncPreempt2(SB)") - lSSE.restore(g) + + p("// Restore non-GPs from *p.xRegs.cache") + p("MOVQ g_m(R14), %s", xReg) + p("MOVQ m_p(%s), %s", xReg, xReg) + p("MOVQ (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg) + lXRegs.restore(g) + + p("// Restore GPs") l.restore(g) - p("ADJSP $%d", -lSSE.stack) + p("ADJSP $%d", -l.stack) p("POPFQ") p("POPQ BP") p("RET") diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index c41c3558359c0c..d053747d3a4db6 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -292,21 +292,43 @@ func canPreemptM(mp *m) bool { // asyncPreempt saves all user registers and calls asyncPreempt2. // -// When stack scanning encounters an asyncPreempt frame, it scans that +// It saves GP registers (anything that might contain a pointer) to the G stack. +// Hence, when stack scanning encounters an asyncPreempt frame, it scans that // frame and its parent frame conservatively. // +// On some platforms, it saves large additional scalar-only register state such +// as vector registers to an "extended register state" on the P. +// // asyncPreempt is implemented in assembly. func asyncPreempt() //go:nosplit func asyncPreempt2() { + // We can't grow the stack with untyped data from asyncPreempt, so switch to + // the system stack right away. + mcall(func(gp *g) { + gp.asyncSafePoint = true + + // Move the extended register state from the P to the G. We do this now that + // we're on the system stack to avoid stack splits. + xRegSave(gp) + + if gp.preemptStop { + preemptPark(gp) + } else { + gopreempt_m(gp) + } + // The above functions never return. + }) + + // Do not grow the stack below here! + gp := getg() - gp.asyncSafePoint = true - if gp.preemptStop { - mcall(preemptPark) - } else { - mcall(gopreempt_m) - } + + // Put the extended register state back on the M so resumption can find it. + // We can't do this in asyncPreemptM because the park calls never return. + xRegRestore(gp) + gp.asyncSafePoint = false } @@ -319,19 +341,13 @@ func init() { total := funcMaxSPDelta(f) f = findfunc(abi.FuncPCABIInternal(asyncPreempt2)) total += funcMaxSPDelta(f) + f = findfunc(abi.FuncPCABIInternal(xRegRestore)) + total += funcMaxSPDelta(f) // Add some overhead for return PCs, etc. asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize if asyncPreemptStack > stackNosplit { - // We need more than the nosplit limit. This isn't - // unsafe, but it may limit asynchronous preemption. - // - // This may be a problem if we start using more - // registers. In that case, we should store registers - // in a context object. If we pre-allocate one per P, - // asyncPreempt can spill just a few registers to the - // stack, then grab its context object and spill into - // it. When it enters the runtime, it would allocate a - // new context for the P. + // We need more than the nosplit limit. This isn't unsafe, but it may + // limit asynchronous preemption. Consider moving state into xRegState. print("runtime: asyncPreemptStack=", asyncPreemptStack, "\n") throw("async stack too large") } diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go new file mode 100644 index 00000000000000..904defac331622 --- /dev/null +++ b/src/runtime/preempt_amd64.go @@ -0,0 +1,22 @@ +// Code generated by mkpreempt.go; DO NOT EDIT. + +package runtime + +type xRegState struct { + X0 [16]byte + X1 [16]byte + X2 [16]byte + X3 [16]byte + X4 [16]byte + X5 [16]byte + X6 [16]byte + X7 [16]byte + X8 [16]byte + X9 [16]byte + X10 [16]byte + X11 [16]byte + X12 [16]byte + X13 [16]byte + X14 [16]byte + X15 [16]byte +} diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s index 8e3ed0d7c59dce..0a33ce7f3e72f5 100644 --- a/src/runtime/preempt_amd64.s +++ b/src/runtime/preempt_amd64.s @@ -1,6 +1,7 @@ // Code generated by mkpreempt.go; DO NOT EDIT. #include "go_asm.h" +#include "go_tls.h" #include "asm_amd64.h" #include "textflag.h" @@ -10,9 +11,10 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 // Save flags before clobbering them PUSHFQ // obj doesn't understand ADD/SUB on SP, but does understand ADJSP - ADJSP $368 + ADJSP $112 // But vet doesn't know ADJSP, so suppress vet stack checking NOP SP + // Save GPs MOVQ AX, 0(SP) MOVQ CX, 8(SP) MOVQ DX, 16(SP) @@ -27,39 +29,51 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVQ R13, 88(SP) MOVQ R14, 96(SP) MOVQ R15, 104(SP) - MOVUPS X0, 112(SP) - MOVUPS X1, 128(SP) - MOVUPS X2, 144(SP) - MOVUPS X3, 160(SP) - MOVUPS X4, 176(SP) - MOVUPS X5, 192(SP) - MOVUPS X6, 208(SP) - MOVUPS X7, 224(SP) - MOVUPS X8, 240(SP) - MOVUPS X9, 256(SP) - MOVUPS X10, 272(SP) - MOVUPS X11, 288(SP) - MOVUPS X12, 304(SP) - MOVUPS X13, 320(SP) - MOVUPS X14, 336(SP) - MOVUPS X15, 352(SP) + // Save extended register state to p.xRegs.scratch + // Don't make assumptions about ABI register state. See mkpreempt.go + get_tls(CX) + MOVQ g(CX), R14 + MOVQ g_m(R14), AX + MOVQ m_p(AX), AX + LEAQ (p_xRegs+xRegPerP_scratch)(AX), AX + MOVUPS X0, 0(AX) + MOVUPS X1, 16(AX) + MOVUPS X2, 32(AX) + MOVUPS X3, 48(AX) + MOVUPS X4, 64(AX) + MOVUPS X5, 80(AX) + MOVUPS X6, 96(AX) + MOVUPS X7, 112(AX) + MOVUPS X8, 128(AX) + MOVUPS X9, 144(AX) + MOVUPS X10, 160(AX) + MOVUPS X11, 176(AX) + MOVUPS X12, 192(AX) + MOVUPS X13, 208(AX) + MOVUPS X14, 224(AX) + MOVUPS X15, 240(AX) CALL ·asyncPreempt2(SB) - MOVUPS 352(SP), X15 - MOVUPS 336(SP), X14 - MOVUPS 320(SP), X13 - MOVUPS 304(SP), X12 - MOVUPS 288(SP), X11 - MOVUPS 272(SP), X10 - MOVUPS 256(SP), X9 - MOVUPS 240(SP), X8 - MOVUPS 224(SP), X7 - MOVUPS 208(SP), X6 - MOVUPS 192(SP), X5 - MOVUPS 176(SP), X4 - MOVUPS 160(SP), X3 - MOVUPS 144(SP), X2 - MOVUPS 128(SP), X1 - MOVUPS 112(SP), X0 + // Restore non-GPs from *p.xRegs.cache + MOVQ g_m(R14), AX + MOVQ m_p(AX), AX + MOVQ (p_xRegs+xRegPerP_cache)(AX), AX + MOVUPS 240(AX), X15 + MOVUPS 224(AX), X14 + MOVUPS 208(AX), X13 + MOVUPS 192(AX), X12 + MOVUPS 176(AX), X11 + MOVUPS 160(AX), X10 + MOVUPS 144(AX), X9 + MOVUPS 128(AX), X8 + MOVUPS 112(AX), X7 + MOVUPS 96(AX), X6 + MOVUPS 80(AX), X5 + MOVUPS 64(AX), X4 + MOVUPS 48(AX), X3 + MOVUPS 32(AX), X2 + MOVUPS 16(AX), X1 + MOVUPS 0(AX), X0 + // Restore GPs MOVQ 104(SP), R15 MOVQ 96(SP), R14 MOVQ 88(SP), R13 @@ -74,7 +88,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVQ 16(SP), DX MOVQ 8(SP), CX MOVQ 0(SP), AX - ADJSP $-368 + ADJSP $-112 POPFQ POPQ BP RET diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go new file mode 100644 index 00000000000000..dfe46559b5b723 --- /dev/null +++ b/src/runtime/preempt_noxreg.go @@ -0,0 +1,27 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 + +// This provides common support for architectures that DO NOT use extended +// register state in asynchronous preemption. + +package runtime + +type xRegPerG struct{} + +type xRegPerP struct{} + +// xRegState is defined only so the build fails if we try to define a real +// xRegState on a noxreg architecture. +type xRegState struct{} + +func xRegInitAlloc() {} + +func xRegSave(gp *g) {} + +//go:nosplit +func xRegRestore(gp *g) {} + +func (*xRegPerP) free() {} diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go new file mode 100644 index 00000000000000..f0a47c15d97053 --- /dev/null +++ b/src/runtime/preempt_xreg.go @@ -0,0 +1,127 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 + +// This provides common support for architectures that use extended register +// state in asynchronous preemption. +// +// While asynchronous preemption stores general-purpose (GP) registers on the +// preempted goroutine's own stack, extended register state can be used to save +// non-GP state off the stack. In particular, this is meant for large vector +// register files. Currently, we assume this contains only scalar data, though +// we could change this constraint by conservatively scanning this memory. +// +// For an architecture to support extended register state, it must provide a Go +// definition of an xRegState type for storing the state, and its asyncPreempt +// implementation must write this register state to p.xRegs.scratch. + +package runtime + +import "unsafe" + +// xRegPerG stores extended register state while a goroutine is asynchronously +// preempted. This is nil otherwise, so we can reuse a (likely small) pool of +// xRegState objects. +type xRegPerG struct { + state *xRegState +} + +type xRegPerP struct { + // scratch temporary per-P space where [asyncPreempt] saves the register + // state before entering Go. It's quickly copied to per-G state. + scratch xRegState + + // cache is a 1-element allocation cache of extended register state used by + // asynchronous preemption. On entry to preemption, this is used as a simple + // allocation cache. On exit from preemption, the G's xRegState is always + // stored here where it can be restored, and later either freed or reused + // for another preemption. On exit, this serves the dual purpose of + // delay-freeing the allocated xRegState until after we've definitely + // restored it. + cache *xRegState +} + +// xRegAlloc allocates xRegState objects. +var xRegAlloc struct { + lock mutex + alloc fixalloc +} + +func xRegInitAlloc() { + lockInit(&xRegAlloc.lock, lockRankXRegAlloc) + xRegAlloc.alloc.init(unsafe.Sizeof(xRegState{}), nil, nil, &memstats.other_sys) +} + +// xRegSave saves the extended register state on this P to gp. +// +// This must run on the system stack because it assumes the P won't change. +// +//go:systemstack +func xRegSave(gp *g) { + if gp.xRegs.state != nil { + // Double preempt? + throw("gp.xRegState.p != nil on async preempt") + } + + // Get the place to save the register state. + var dest *xRegState + pp := gp.m.p.ptr() + if pp.xRegs.cache != nil { + // Use the cached allocation. + dest = pp.xRegs.cache + pp.xRegs.cache = nil + } else { + // Allocate a new save block. + lock(&xRegAlloc.lock) + dest = (*xRegState)(xRegAlloc.alloc.alloc()) + unlock(&xRegAlloc.lock) + } + + // Copy state saved in the scratchpad to dest. + // + // If we ever need to save less state (e.g., avoid saving vector registers + // that aren't in use), we could have multiple allocation pools for + // different size states and copy only the registers we need. + *dest = pp.xRegs.scratch + + // Save on the G. + gp.xRegs.state = dest +} + +// xRegRestore prepares the extended register state on gp to be restored. +// +// It moves the state to gp.m.p.xRegs.cache where [asyncPreempt] expects to find +// it. This means nothing else may use the cache between this call and the +// return to asyncPreempt. This is not quite symmetric with [xRegSave], which +// uses gp.m.p.xRegs.scratch. By using cache instead, we save a block copy. +// +// This is called with asyncPreempt on the stack and thus must not grow the +// stack. +// +//go:nosplit +func xRegRestore(gp *g) { + if gp.xRegs.state == nil { + throw("gp.xRegState.p == nil on return from async preempt") + } + // If the P has a block cached on it, free that so we can replace it. + pp := gp.m.p.ptr() + if pp.xRegs.cache != nil { + // Don't grow the G stack. + systemstack(func() { + pp.xRegs.free() + }) + } + pp.xRegs.cache = gp.xRegs.state + gp.xRegs.state = nil +} + +func (xRegs *xRegPerP) free() { + if xRegs.cache != nil { + lock(&xRegAlloc.lock) + xRegAlloc.alloc.free(unsafe.Pointer(xRegs.cache)) + xRegs.cache = nil + unlock(&xRegAlloc.lock) + } +} diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 98173084302a33..b2ae46e0e4ac8e 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -5799,6 +5799,7 @@ func (pp *p) destroy() { pp.gcAssistTime = 0 gcCleanups.queued += pp.cleanupsQueued pp.cleanupsQueued = 0 + pp.xRegs.free() pp.status = _Pdead } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 96720846b24559..789b68e54e66da 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -491,6 +491,10 @@ type g struct { coroarg *coro // argument during coroutine transfers bubble *synctestBubble + // xRegs stores the extended register state if this G has been + // asynchronously preempted. + xRegs xRegPerG + // Per-G tracer state. trace gTraceState @@ -760,6 +764,11 @@ type p struct { // gcStopTime is the nanotime timestamp that this P last entered _Pgcstop. gcStopTime int64 + // xRegs is the per-P extended register state used by asynchronous + // preemption. This is an empty struct on platforms that don't use extended + // register state. + xRegs xRegPerP + // Padding is no longer needed. False sharing is now not a worry because p is large enough // that its size class is an integer multiple of the cache line size (for any of our architectures). } diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index a5dc8aed3443bc..de859866a5adb2 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -15,13 +15,18 @@ import ( func TestSizeof(t *testing.T) { const _64bit = unsafe.Sizeof(uintptr(0)) == 8 + const xreg = unsafe.Sizeof(runtime.XRegPerG{}) // Varies per architecture var tests = []struct { val any // type as a value _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {runtime.G{}, 280, 440}, // g, but exported for testing - {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing + {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing + {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing + } + + if xreg > runtime.PtrSize { + t.Errorf("unsafe.Sizeof(xRegPerG) = %d, want <= %d", xreg, runtime.PtrSize) } for _, tt := range tests { From 9eeb1e7a9afb992e899d3917fce92c01b3fa50c1 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 12 Jun 2025 15:33:41 -0400 Subject: [PATCH 054/139] [dev.simd] runtime: save AVX2 and AVX-512 state on asynchronous preemption Based on CL 669415 by shaojunyang@google.com. Change-Id: I574f15c3b18a7179a1573aaf567caf18d8602ef1 Reviewed-on: https://go-review.googlesource.com/c/go/+/680900 LUCI-TryBot-Result: Go LUCI Auto-Submit: Austin Clements Reviewed-by: Cherry Mui --- src/runtime/cpuflags.go | 1 + src/runtime/mkpreempt.go | 74 ++++++++++++++-- src/runtime/preempt_amd64.go | 40 +++++---- src/runtime/preempt_amd64.s | 166 ++++++++++++++++++++++++++++------- 4 files changed, 227 insertions(+), 54 deletions(-) diff --git a/src/runtime/cpuflags.go b/src/runtime/cpuflags.go index bd1cb328d37b87..6452364b68ec32 100644 --- a/src/runtime/cpuflags.go +++ b/src/runtime/cpuflags.go @@ -13,6 +13,7 @@ import ( const ( offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX) offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2) + offsetX86HasAVX512 = unsafe.Offsetof(cpu.X86.HasAVX512) // F+CD+BW+DQ+VL offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS) offsetX86HasRDTSCP = unsafe.Offsetof(cpu.X86.HasRDTSCP) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index e3dd5046f3f3a0..29e8288129f686 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -285,7 +285,7 @@ func gen386(g *gen) { func genAMD64(g *gen) { const xReg = "AX" // *xRegState - p := g.p + p, label := g.p, g.label // Assign stack offsets. var l = layout{sp: "SP"} @@ -297,15 +297,33 @@ func genAMD64(g *gen) { l.add("MOVQ", reg, 8) } } - lXRegs := layout{sp: xReg} // Non-GP registers - for _, reg := range regNamesAMD64 { - if strings.HasPrefix(reg, "X") { - lXRegs.add("MOVUPS", reg, 16) + // Create layouts for X, Y, and Z registers. + const ( + numXRegs = 16 + numZRegs = 16 // TODO: If we start using upper registers, change to 32 + numKRegs = 8 + ) + lZRegs := layout{sp: xReg} // Non-GP registers + lXRegs, lYRegs := lZRegs, lZRegs + for i := range numZRegs { + lZRegs.add("VMOVDQU64", fmt.Sprintf("Z%d", i), 512/8) + if i < numXRegs { + // Use SSE-only instructions for X registers. + lXRegs.add("MOVUPS", fmt.Sprintf("X%d", i), 128/8) + lYRegs.add("VMOVDQU", fmt.Sprintf("Y%d", i), 256/8) } } - writeXRegs(g.goarch, &lXRegs) - - // TODO: MXCSR register? + for i := range numKRegs { + lZRegs.add("KMOVQ", fmt.Sprintf("K%d", i), 8) + } + // The Z layout is the most general, so we line up the others with that one. + // We don't have to do this, but it results in a nice Go type. If we split + // this into multiple types, we probably should stop doing this. + for i := range lXRegs.regs { + lXRegs.regs[i].pos = lZRegs.regs[i].pos + lYRegs.regs[i].pos = lZRegs.regs[i].pos + } + writeXRegs(g.goarch, &lZRegs) p("PUSHQ BP") p("MOVQ SP, BP") @@ -333,16 +351,56 @@ func genAMD64(g *gen) { p("MOVQ g_m(R14), %s", xReg) p("MOVQ m_p(%s), %s", xReg, xReg) p("LEAQ (p_xRegs+xRegPerP_scratch)(%s), %s", xReg, xReg) + + // Which registers do we need to save? + p("#ifdef GOEXPERIMENT_simd") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1") + p("JE saveAVX512") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1") + p("JE saveAVX2") + p("#endif") + + // No features. Assume only SSE. + label("saveSSE:") lXRegs.save(g) + p("JMP preempt") + label("saveAVX2:") + lYRegs.save(g) + p("JMP preempt") + + label("saveAVX512:") + lZRegs.save(g) + p("JMP preempt") + + label("preempt:") p("CALL ·asyncPreempt2(SB)") p("// Restore non-GPs from *p.xRegs.cache") p("MOVQ g_m(R14), %s", xReg) p("MOVQ m_p(%s), %s", xReg, xReg) p("MOVQ (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg) + + p("#ifdef GOEXPERIMENT_simd") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1") + p("JE restoreAVX512") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1") + p("JE restoreAVX2") + p("#endif") + + label("restoreSSE:") lXRegs.restore(g) + p("JMP restoreGPs") + + label("restoreAVX2:") + lYRegs.restore(g) + p("JMP restoreGPs") + + label("restoreAVX512:") + lZRegs.restore(g) + p("JMP restoreGPs") + label("restoreGPs:") p("// Restore GPs") l.restore(g) p("ADJSP $%d", -l.stack) diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go index 904defac331622..44838a1df21dc2 100644 --- a/src/runtime/preempt_amd64.go +++ b/src/runtime/preempt_amd64.go @@ -3,20 +3,28 @@ package runtime type xRegState struct { - X0 [16]byte - X1 [16]byte - X2 [16]byte - X3 [16]byte - X4 [16]byte - X5 [16]byte - X6 [16]byte - X7 [16]byte - X8 [16]byte - X9 [16]byte - X10 [16]byte - X11 [16]byte - X12 [16]byte - X13 [16]byte - X14 [16]byte - X15 [16]byte + Z0 [64]byte + Z1 [64]byte + Z2 [64]byte + Z3 [64]byte + Z4 [64]byte + Z5 [64]byte + Z6 [64]byte + Z7 [64]byte + Z8 [64]byte + Z9 [64]byte + Z10 [64]byte + Z11 [64]byte + Z12 [64]byte + Z13 [64]byte + Z14 [64]byte + Z15 [64]byte + K0 uint64 + K1 uint64 + K2 uint64 + K3 uint64 + K4 uint64 + K5 uint64 + K6 uint64 + K7 uint64 } diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s index 0a33ce7f3e72f5..c35de7f3b75726 100644 --- a/src/runtime/preempt_amd64.s +++ b/src/runtime/preempt_amd64.s @@ -36,43 +36,149 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVQ g_m(R14), AX MOVQ m_p(AX), AX LEAQ (p_xRegs+xRegPerP_scratch)(AX), AX + #ifdef GOEXPERIMENT_simd + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JE saveAVX512 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 + JE saveAVX2 + #endif +saveSSE: MOVUPS X0, 0(AX) - MOVUPS X1, 16(AX) - MOVUPS X2, 32(AX) - MOVUPS X3, 48(AX) - MOVUPS X4, 64(AX) - MOVUPS X5, 80(AX) - MOVUPS X6, 96(AX) - MOVUPS X7, 112(AX) - MOVUPS X8, 128(AX) - MOVUPS X9, 144(AX) - MOVUPS X10, 160(AX) - MOVUPS X11, 176(AX) - MOVUPS X12, 192(AX) - MOVUPS X13, 208(AX) - MOVUPS X14, 224(AX) - MOVUPS X15, 240(AX) + MOVUPS X1, 64(AX) + MOVUPS X2, 128(AX) + MOVUPS X3, 192(AX) + MOVUPS X4, 256(AX) + MOVUPS X5, 320(AX) + MOVUPS X6, 384(AX) + MOVUPS X7, 448(AX) + MOVUPS X8, 512(AX) + MOVUPS X9, 576(AX) + MOVUPS X10, 640(AX) + MOVUPS X11, 704(AX) + MOVUPS X12, 768(AX) + MOVUPS X13, 832(AX) + MOVUPS X14, 896(AX) + MOVUPS X15, 960(AX) + JMP preempt +saveAVX2: + VMOVDQU Y0, 0(AX) + VMOVDQU Y1, 64(AX) + VMOVDQU Y2, 128(AX) + VMOVDQU Y3, 192(AX) + VMOVDQU Y4, 256(AX) + VMOVDQU Y5, 320(AX) + VMOVDQU Y6, 384(AX) + VMOVDQU Y7, 448(AX) + VMOVDQU Y8, 512(AX) + VMOVDQU Y9, 576(AX) + VMOVDQU Y10, 640(AX) + VMOVDQU Y11, 704(AX) + VMOVDQU Y12, 768(AX) + VMOVDQU Y13, 832(AX) + VMOVDQU Y14, 896(AX) + VMOVDQU Y15, 960(AX) + JMP preempt +saveAVX512: + VMOVDQU64 Z0, 0(AX) + VMOVDQU64 Z1, 64(AX) + VMOVDQU64 Z2, 128(AX) + VMOVDQU64 Z3, 192(AX) + VMOVDQU64 Z4, 256(AX) + VMOVDQU64 Z5, 320(AX) + VMOVDQU64 Z6, 384(AX) + VMOVDQU64 Z7, 448(AX) + VMOVDQU64 Z8, 512(AX) + VMOVDQU64 Z9, 576(AX) + VMOVDQU64 Z10, 640(AX) + VMOVDQU64 Z11, 704(AX) + VMOVDQU64 Z12, 768(AX) + VMOVDQU64 Z13, 832(AX) + VMOVDQU64 Z14, 896(AX) + VMOVDQU64 Z15, 960(AX) + KMOVQ K0, 1024(AX) + KMOVQ K1, 1032(AX) + KMOVQ K2, 1040(AX) + KMOVQ K3, 1048(AX) + KMOVQ K4, 1056(AX) + KMOVQ K5, 1064(AX) + KMOVQ K6, 1072(AX) + KMOVQ K7, 1080(AX) + JMP preempt +preempt: CALL ·asyncPreempt2(SB) // Restore non-GPs from *p.xRegs.cache MOVQ g_m(R14), AX MOVQ m_p(AX), AX MOVQ (p_xRegs+xRegPerP_cache)(AX), AX - MOVUPS 240(AX), X15 - MOVUPS 224(AX), X14 - MOVUPS 208(AX), X13 - MOVUPS 192(AX), X12 - MOVUPS 176(AX), X11 - MOVUPS 160(AX), X10 - MOVUPS 144(AX), X9 - MOVUPS 128(AX), X8 - MOVUPS 112(AX), X7 - MOVUPS 96(AX), X6 - MOVUPS 80(AX), X5 - MOVUPS 64(AX), X4 - MOVUPS 48(AX), X3 - MOVUPS 32(AX), X2 - MOVUPS 16(AX), X1 + #ifdef GOEXPERIMENT_simd + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JE restoreAVX512 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 + JE restoreAVX2 + #endif +restoreSSE: + MOVUPS 960(AX), X15 + MOVUPS 896(AX), X14 + MOVUPS 832(AX), X13 + MOVUPS 768(AX), X12 + MOVUPS 704(AX), X11 + MOVUPS 640(AX), X10 + MOVUPS 576(AX), X9 + MOVUPS 512(AX), X8 + MOVUPS 448(AX), X7 + MOVUPS 384(AX), X6 + MOVUPS 320(AX), X5 + MOVUPS 256(AX), X4 + MOVUPS 192(AX), X3 + MOVUPS 128(AX), X2 + MOVUPS 64(AX), X1 MOVUPS 0(AX), X0 + JMP restoreGPs +restoreAVX2: + VMOVDQU 960(AX), Y15 + VMOVDQU 896(AX), Y14 + VMOVDQU 832(AX), Y13 + VMOVDQU 768(AX), Y12 + VMOVDQU 704(AX), Y11 + VMOVDQU 640(AX), Y10 + VMOVDQU 576(AX), Y9 + VMOVDQU 512(AX), Y8 + VMOVDQU 448(AX), Y7 + VMOVDQU 384(AX), Y6 + VMOVDQU 320(AX), Y5 + VMOVDQU 256(AX), Y4 + VMOVDQU 192(AX), Y3 + VMOVDQU 128(AX), Y2 + VMOVDQU 64(AX), Y1 + VMOVDQU 0(AX), Y0 + JMP restoreGPs +restoreAVX512: + KMOVQ 1080(AX), K7 + KMOVQ 1072(AX), K6 + KMOVQ 1064(AX), K5 + KMOVQ 1056(AX), K4 + KMOVQ 1048(AX), K3 + KMOVQ 1040(AX), K2 + KMOVQ 1032(AX), K1 + KMOVQ 1024(AX), K0 + VMOVDQU64 960(AX), Z15 + VMOVDQU64 896(AX), Z14 + VMOVDQU64 832(AX), Z13 + VMOVDQU64 768(AX), Z12 + VMOVDQU64 704(AX), Z11 + VMOVDQU64 640(AX), Z10 + VMOVDQU64 576(AX), Z9 + VMOVDQU64 512(AX), Z8 + VMOVDQU64 448(AX), Z7 + VMOVDQU64 384(AX), Z6 + VMOVDQU64 320(AX), Z5 + VMOVDQU64 256(AX), Z4 + VMOVDQU64 192(AX), Z3 + VMOVDQU64 128(AX), Z2 + VMOVDQU64 64(AX), Z1 + VMOVDQU64 0(AX), Z0 + JMP restoreGPs +restoreGPs: // Restore GPs MOVQ 104(SP), R15 MOVQ 96(SP), R14 From 59846af331228b28e69326412011b26b62f0c74d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 30 Jun 2025 18:37:48 +0000 Subject: [PATCH 055/139] [dev.simd] cmd/compile, simd: cleanup operations and documentations This CL is generated by CL 685035. Change-Id: Ic3a043e83e62d0be77de97ef63a20d34bf1e2dc0 Reviewed-on: https://go-review.googlesource.com/c/go/+/685055 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- .../compile/internal/ssa/_gen/simdAMD64.rules | 96 - .../internal/ssa/_gen/simdgenericOps.go | 96 - src/cmd/compile/internal/ssa/opGen.go | 672 ------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1680 ----------------- .../compile/internal/ssagen/simdintrinsics.go | 96 - src/simd/simd_wrapped_test.go | 16 - src/simd/stubs_amd64.go | 1093 ----------- 7 files changed, 3749 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6a4ded0ec496b9..3768c5aaadc338 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -120,60 +120,30 @@ (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) (CeilFloat64x4 x) => (VROUNDPD256 [2] x) -(CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+10] x) (CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) (CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) (CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) (CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) (CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) (CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+10] x) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) (DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) (DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+9] x) (DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) (DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) (DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) (DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) (DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+8] x) (DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) (DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) (DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+11] x) (DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) (DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) (DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) @@ -221,12 +191,6 @@ (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) (FloorFloat64x4 x) => (VROUNDPD256 [1] x) -(FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+9] x) (FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) (FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) (FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) @@ -490,60 +454,30 @@ (MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) (MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) (MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) @@ -586,12 +520,6 @@ (MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) (MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) (MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) @@ -970,12 +898,6 @@ (MaskedRotateRightUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedRotateRightUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedRotateRightUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) (MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) (MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) @@ -1195,12 +1117,6 @@ (MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) (MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) (MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) @@ -1490,12 +1406,6 @@ (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) (RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+8] x) (RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) (RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) (RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) @@ -1757,12 +1667,6 @@ (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) (TruncFloat64x4 x) => (VROUNDPD256 [3] x) -(TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+11] x) (TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) (TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) (TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4907b78d12e6d2..b68b237c312548 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1464,197 +1464,101 @@ func simdGenericOps() []opData { {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, - {name: "CeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 906bd74cdcbd11..fec727ea12ec78 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5797,197 +5797,101 @@ const ( OpSaturatedSubUint8x64 OpSaturatedUnsignedSignedPairDotProdUint8x64 OpSubUint8x64 - OpCeilSuppressExceptionWithPrecisionFloat32x16 OpCeilWithPrecisionFloat32x16 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 OpDiffWithCeilWithPrecisionFloat32x16 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 OpDiffWithFloorWithPrecisionFloat32x16 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 OpDiffWithRoundWithPrecisionFloat32x16 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 OpDiffWithTruncWithPrecisionFloat32x16 - OpFloorSuppressExceptionWithPrecisionFloat32x16 OpFloorWithPrecisionFloat32x16 - OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16 OpMaskedCeilWithPrecisionFloat32x16 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithCeilWithPrecisionFloat32x16 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithFloorWithPrecisionFloat32x16 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithRoundWithPrecisionFloat32x16 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithTruncWithPrecisionFloat32x16 - OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16 OpMaskedFloorWithPrecisionFloat32x16 - OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16 OpMaskedRoundWithPrecisionFloat32x16 - OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16 OpMaskedTruncWithPrecisionFloat32x16 - OpRoundSuppressExceptionWithPrecisionFloat32x16 OpRoundWithPrecisionFloat32x16 - OpTruncSuppressExceptionWithPrecisionFloat32x16 OpTruncWithPrecisionFloat32x16 - OpCeilSuppressExceptionWithPrecisionFloat32x4 OpCeilWithPrecisionFloat32x4 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 OpDiffWithCeilWithPrecisionFloat32x4 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 OpDiffWithFloorWithPrecisionFloat32x4 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 OpDiffWithRoundWithPrecisionFloat32x4 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 OpDiffWithTruncWithPrecisionFloat32x4 - OpFloorSuppressExceptionWithPrecisionFloat32x4 OpFloorWithPrecisionFloat32x4 - OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4 OpMaskedCeilWithPrecisionFloat32x4 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithCeilWithPrecisionFloat32x4 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithFloorWithPrecisionFloat32x4 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithRoundWithPrecisionFloat32x4 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithTruncWithPrecisionFloat32x4 - OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4 OpMaskedFloorWithPrecisionFloat32x4 - OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4 OpMaskedRoundWithPrecisionFloat32x4 - OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4 OpMaskedTruncWithPrecisionFloat32x4 - OpRoundSuppressExceptionWithPrecisionFloat32x4 OpRoundWithPrecisionFloat32x4 - OpTruncSuppressExceptionWithPrecisionFloat32x4 OpTruncWithPrecisionFloat32x4 - OpCeilSuppressExceptionWithPrecisionFloat32x8 OpCeilWithPrecisionFloat32x8 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 OpDiffWithCeilWithPrecisionFloat32x8 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 OpDiffWithFloorWithPrecisionFloat32x8 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 OpDiffWithRoundWithPrecisionFloat32x8 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 OpDiffWithTruncWithPrecisionFloat32x8 - OpFloorSuppressExceptionWithPrecisionFloat32x8 OpFloorWithPrecisionFloat32x8 - OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8 OpMaskedCeilWithPrecisionFloat32x8 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithCeilWithPrecisionFloat32x8 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithFloorWithPrecisionFloat32x8 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithRoundWithPrecisionFloat32x8 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithTruncWithPrecisionFloat32x8 - OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8 OpMaskedFloorWithPrecisionFloat32x8 - OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8 OpMaskedRoundWithPrecisionFloat32x8 - OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8 OpMaskedTruncWithPrecisionFloat32x8 - OpRoundSuppressExceptionWithPrecisionFloat32x8 OpRoundWithPrecisionFloat32x8 - OpTruncSuppressExceptionWithPrecisionFloat32x8 OpTruncWithPrecisionFloat32x8 - OpCeilSuppressExceptionWithPrecisionFloat64x2 OpCeilWithPrecisionFloat64x2 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 OpDiffWithCeilWithPrecisionFloat64x2 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 OpDiffWithFloorWithPrecisionFloat64x2 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 OpDiffWithRoundWithPrecisionFloat64x2 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 OpDiffWithTruncWithPrecisionFloat64x2 - OpFloorSuppressExceptionWithPrecisionFloat64x2 OpFloorWithPrecisionFloat64x2 - OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2 OpMaskedCeilWithPrecisionFloat64x2 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithCeilWithPrecisionFloat64x2 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithFloorWithPrecisionFloat64x2 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithRoundWithPrecisionFloat64x2 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithTruncWithPrecisionFloat64x2 - OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2 OpMaskedFloorWithPrecisionFloat64x2 - OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2 OpMaskedRoundWithPrecisionFloat64x2 - OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2 OpMaskedTruncWithPrecisionFloat64x2 - OpRoundSuppressExceptionWithPrecisionFloat64x2 OpRoundWithPrecisionFloat64x2 - OpTruncSuppressExceptionWithPrecisionFloat64x2 OpTruncWithPrecisionFloat64x2 - OpCeilSuppressExceptionWithPrecisionFloat64x4 OpCeilWithPrecisionFloat64x4 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 OpDiffWithCeilWithPrecisionFloat64x4 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 OpDiffWithFloorWithPrecisionFloat64x4 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 OpDiffWithRoundWithPrecisionFloat64x4 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 OpDiffWithTruncWithPrecisionFloat64x4 - OpFloorSuppressExceptionWithPrecisionFloat64x4 OpFloorWithPrecisionFloat64x4 - OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4 OpMaskedCeilWithPrecisionFloat64x4 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithCeilWithPrecisionFloat64x4 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithFloorWithPrecisionFloat64x4 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithRoundWithPrecisionFloat64x4 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithTruncWithPrecisionFloat64x4 - OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4 OpMaskedFloorWithPrecisionFloat64x4 - OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4 OpMaskedRoundWithPrecisionFloat64x4 - OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4 OpMaskedTruncWithPrecisionFloat64x4 - OpRoundSuppressExceptionWithPrecisionFloat64x4 OpRoundWithPrecisionFloat64x4 - OpTruncSuppressExceptionWithPrecisionFloat64x4 OpTruncWithPrecisionFloat64x4 - OpCeilSuppressExceptionWithPrecisionFloat64x8 OpCeilWithPrecisionFloat64x8 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 OpDiffWithCeilWithPrecisionFloat64x8 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 OpDiffWithFloorWithPrecisionFloat64x8 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 OpDiffWithRoundWithPrecisionFloat64x8 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 OpDiffWithTruncWithPrecisionFloat64x8 - OpFloorSuppressExceptionWithPrecisionFloat64x8 OpFloorWithPrecisionFloat64x8 - OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8 OpMaskedCeilWithPrecisionFloat64x8 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithCeilWithPrecisionFloat64x8 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithFloorWithPrecisionFloat64x8 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithRoundWithPrecisionFloat64x8 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithTruncWithPrecisionFloat64x8 - OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8 OpMaskedFloorWithPrecisionFloat64x8 - OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8 OpMaskedRoundWithPrecisionFloat64x8 - OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8 OpMaskedTruncWithPrecisionFloat64x8 - OpRoundSuppressExceptionWithPrecisionFloat64x8 OpRoundWithPrecisionFloat64x8 - OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 OpMaskedShiftAllLeftAndFillUpperFromInt16x16 OpMaskedShiftAllRightAndFillUpperFromInt16x16 @@ -67532,1152 +67436,576 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 22085dc80eadeb..15ca2fcc5b4dfe 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1014,18 +1014,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilFloat64x2(v) case OpCeilFloat64x4: return rewriteValueAMD64_OpCeilFloat64x4(v) - case OpCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v) case OpCeilWithPrecisionFloat32x4: @@ -1124,18 +1112,6 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v) case OpDiffWithCeilWithPrecisionFloat32x4: @@ -1148,18 +1124,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) case OpDiffWithCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) case OpDiffWithFloorWithPrecisionFloat32x4: @@ -1172,18 +1136,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) case OpDiffWithFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) case OpDiffWithRoundWithPrecisionFloat32x4: @@ -1196,18 +1148,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) case OpDiffWithRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) case OpDiffWithTruncWithPrecisionFloat32x4: @@ -1361,18 +1301,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorFloat64x2(v) case OpFloorFloat64x4: return rewriteValueAMD64_OpFloorFloat64x4(v) - case OpFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v) case OpFloorWithPrecisionFloat32x4: @@ -2037,18 +1965,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAverageUint8x32(v) case OpMaskedAverageUint8x64: return rewriteValueAMD64_OpMaskedAverageUint8x64(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v) case OpMaskedCeilWithPrecisionFloat32x4: @@ -2061,18 +1977,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v) case OpMaskedCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v) case OpMaskedDiffWithCeilWithPrecisionFloat32x4: @@ -2085,18 +1989,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v) case OpMaskedDiffWithCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v) case OpMaskedDiffWithFloorWithPrecisionFloat32x4: @@ -2109,18 +2001,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v) case OpMaskedDiffWithFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v) case OpMaskedDiffWithRoundWithPrecisionFloat32x4: @@ -2133,18 +2013,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v) case OpMaskedDiffWithRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v) case OpMaskedDiffWithTruncWithPrecisionFloat32x4: @@ -2229,18 +2097,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedEqualUint8x32(v) case OpMaskedEqualUint8x64: return rewriteValueAMD64_OpMaskedEqualUint8x64(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v) case OpMaskedFloorWithPrecisionFloat32x4: @@ -2997,18 +2853,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedRotateRightUint64x4(v) case OpMaskedRotateRightUint64x8: return rewriteValueAMD64_OpMaskedRotateRightUint64x8(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v) case OpMaskedRoundWithPrecisionFloat32x4: @@ -3447,18 +3291,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSubUint8x32(v) case OpMaskedSubUint8x64: return rewriteValueAMD64_OpMaskedSubUint8x64(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v) case OpMaskedTruncWithPrecisionFloat32x4: @@ -4375,18 +4207,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundFloat64x2(v) case OpRoundFloat64x4: return rewriteValueAMD64_OpRoundFloat64x4(v) - case OpRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) case OpRoundWithPrecisionFloat32x16: @@ -5267,18 +5087,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncFloat64x2(v) case OpTruncFloat64x4: return rewriteValueAMD64_OpTruncFloat64x4(v) - case OpTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v) case OpTruncWithPrecisionFloat32x4: @@ -28733,84 +28541,6 @@ func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (CeilWithPrecisionFloat32x16 [a] x) @@ -30022,84 +29752,6 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) @@ -30178,84 +29830,6 @@ func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) @@ -30334,84 +29908,6 @@ func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) @@ -30490,84 +29986,6 @@ func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) @@ -31393,84 +30811,6 @@ func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (FloorWithPrecisionFloat32x16 [a] x) @@ -36695,114 +36035,6 @@ func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -36911,114 +36143,6 @@ func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37127,114 +36251,6 @@ func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v *Value) bool return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37343,114 +36359,6 @@ func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v *Value) boo return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37559,114 +36467,6 @@ func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v *Value) boo return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38543,114 +37343,6 @@ func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -46085,114 +44777,6 @@ func rewriteValueAMD64_OpMaskedRotateRightUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -50285,114 +48869,6 @@ func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -52783,84 +51259,6 @@ func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -55619,84 +54017,6 @@ func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (TruncWithPrecisionFloat32x16 [a] x) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a7f9b9d8a3468a..903febac371eeb 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -131,60 +131,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -232,12 +202,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -501,60 +465,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -597,12 +531,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -981,12 +909,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1206,12 +1128,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1501,12 +1417,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1768,12 +1678,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 6399136fb152a9..321d3bb80a4fe1 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7946,49 +7946,34 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 /* The operations below cannot be tested via wrappers, please test them directly */ -// CeilSuppressExceptionWithPrecision // CeilWithPrecision -// DiffWithCeilSuppressExceptionWithPrecision // DiffWithCeilWithPrecision -// DiffWithFloorSuppressExceptionWithPrecision // DiffWithFloorWithPrecision -// DiffWithRoundSuppressExceptionWithPrecision // DiffWithRoundWithPrecision -// DiffWithTruncSuppressExceptionWithPrecision // DiffWithTruncWithPrecision -// FloorSuppressExceptionWithPrecision // FloorWithPrecision // GaloisFieldAffineTransform // GaloisFieldAffineTransformInversed // GetElem -// MaskedCeilSuppressExceptionWithPrecision // MaskedCeilWithPrecision -// MaskedDiffWithCeilSuppressExceptionWithPrecision // MaskedDiffWithCeilWithPrecision -// MaskedDiffWithFloorSuppressExceptionWithPrecision // MaskedDiffWithFloorWithPrecision -// MaskedDiffWithRoundSuppressExceptionWithPrecision // MaskedDiffWithRoundWithPrecision -// MaskedDiffWithTruncSuppressExceptionWithPrecision // MaskedDiffWithTruncWithPrecision -// MaskedFloorSuppressExceptionWithPrecision // MaskedFloorWithPrecision // MaskedGaloisFieldAffineTransform // MaskedGaloisFieldAffineTransformInversed // MaskedRotateAllLeft // MaskedRotateAllRight -// MaskedRoundSuppressExceptionWithPrecision // MaskedRoundWithPrecision // MaskedShiftAllLeft // MaskedShiftAllLeftAndFillUpperFrom // MaskedShiftAllRight // MaskedShiftAllRightAndFillUpperFrom // MaskedShiftAllRightSignExtended -// MaskedTruncSuppressExceptionWithPrecision // MaskedTruncWithPrecision // RotateAllLeft // RotateAllRight -// RoundSuppressExceptionWithPrecision // RoundWithPrecision // SetElem // ShiftAllLeft @@ -7996,5 +7981,4 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // ShiftAllRight // ShiftAllRightAndFillUpperFrom // ShiftAllRightSignExtended -// TruncSuppressExceptionWithPrecision // TruncWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index f0db32a07d154c..f53242cd738daa 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -603,405 +603,181 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* Ceil */ // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Ceil() Float32x4 // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Ceil() Float32x8 // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Ceil() Float64x2 // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Ceil() Float64x4 -/* CeilSuppressExceptionWithPrecision */ - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* CeilWithPrecision */ // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 -/* DiffWithCeilSuppressExceptionWithPrecision */ - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 -/* DiffWithFloorSuppressExceptionWithPrecision */ - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 -/* DiffWithRoundSuppressExceptionWithPrecision */ - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 -/* DiffWithTruncSuppressExceptionWithPrecision */ - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 @@ -1041,7 +817,6 @@ func (x Float64x8) Div(y Float64x8) Float64x8 /* DotProdBroadcast */ // DotProdBroadcast multiplies all elements and broadcasts the sum. -// Const Immediate = 127. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 @@ -1049,181 +824,151 @@ func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 /* Equal */ // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX func (x Int8x16) Equal(y Int8x16) Mask8x16 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Int8x32) Equal(y Int8x32) Mask8x32 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX func (x Int16x8) Equal(y Int16x8) Mask16x8 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX func (x Int32x4) Equal(y Int32x4) Mask32x4 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Int32x8) Equal(y Int32x8) Mask32x8 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX func (x Int64x2) Equal(y Int64x2) Mask64x2 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Equal(y Float32x4) Mask32x4 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Equal(y Float32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Equal(y Float32x16) Mask32x16 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Equal(y Float64x2) Mask64x2 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Equal(y Float64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Equal(y Float64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Equal(y Int8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Equal(y Int16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Equal(y Int32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Equal(y Int64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Equal(y Uint8x16) Mask8x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Equal(y Uint8x32) Mask8x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Equal(y Uint8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Equal(y Uint16x8) Mask16x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Equal(y Uint16x16) Mask16x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Equal(y Uint16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Equal(y Uint32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Equal(y Uint32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Equal(y Uint64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Equal(y Uint64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Equal(y Uint64x8) Mask64x8 @@ -1231,101 +976,53 @@ func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* Floor */ // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Floor() Float32x4 // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Floor() Float32x8 // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Floor() Float64x2 // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Floor() Float64x4 -/* FloorSuppressExceptionWithPrecision */ - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* FloorWithPrecision */ // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 @@ -1546,181 +1243,151 @@ func (x Uint64x2) GetElem(imm uint8) uint64 /* Greater */ // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX func (x Int8x16) Greater(y Int8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX2 func (x Int8x32) Greater(y Int8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX func (x Int16x8) Greater(y Int16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX2 func (x Int16x16) Greater(y Int16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX func (x Int32x4) Greater(y Int32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX2 func (x Int64x4) Greater(y Int64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Greater(y Float32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Greater(y Float32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Greater(y Float32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Greater(y Float64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Greater(y Float64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Greater(y Float64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Greater(y Int8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Greater(y Int16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Greater(y Int32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) Greater(y Int64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Greater(y Uint8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Greater(y Uint8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Greater(y Uint8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Greater(y Uint16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Greater(y Uint16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Greater(y Uint16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Greater(y Uint32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Greater(y Uint32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Greater(y Uint32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Greater(y Uint64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Greater(y Uint64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Greater(y Uint64x8) Mask64x8 @@ -1728,181 +1395,151 @@ func (x Uint64x8) Greater(y Uint64x8) Mask64x8 /* GreaterEqual */ // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 @@ -1910,37 +1547,31 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* IsNan */ // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) IsNan(y Float32x4) Mask32x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) IsNan(y Float32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) IsNan(y Float32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) IsNan(y Float64x2) Mask64x2 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) IsNan(y Float64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) IsNan(y Float64x8) Mask64x8 @@ -1948,181 +1579,151 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* Less */ // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Less(y Float32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Less(y Float32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Less(y Float64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Less(y Float64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Less(y Float64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) Less(y Int8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) Less(y Int8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Less(y Int8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) Less(y Int16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) Less(y Int16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Less(y Int16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) Less(y Int32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) Less(y Int32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Less(y Int32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) Less(y Int64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) Less(y Int64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Less(y Int64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Less(y Uint8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Less(y Uint8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Less(y Uint8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Less(y Uint16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Less(y Uint16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Less(y Uint32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Less(y Uint32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Less(y Uint32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Less(y Uint64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Less(y Uint64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Less(y Uint64x8) Mask64x8 @@ -2130,181 +1731,151 @@ func (x Uint64x8) Less(y Uint64x8) Mask64x8 /* LessEqual */ // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) LessEqual(y Float32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) LessEqual(y Float32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessEqual(y Float32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) LessEqual(y Float64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessEqual(y Float64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessEqual(y Int8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessEqual(y Int8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessEqual(y Int16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessEqual(y Int16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessEqual(y Int16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessEqual(y Int32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessEqual(y Int32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessEqual(y Int32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessEqual(y Int64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessEqual(y Int64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessEqual(y Int64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 @@ -2803,382 +2374,162 @@ func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 -/* MaskedCeilSuppressExceptionWithPrecision */ - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedCeilWithPrecision */ // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithCeilSuppressExceptionWithPrecision */ - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithFloorSuppressExceptionWithPrecision */ - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithRoundSuppressExceptionWithPrecision */ - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithTruncSuppressExceptionWithPrecision */ - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -3218,257 +2569,183 @@ func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 /* MaskedEqual */ // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 -/* MaskedFloorSuppressExceptionWithPrecision */ - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedFloorWithPrecision */ // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -3647,181 +2924,151 @@ func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 /* MaskedGreater */ // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 @@ -3829,181 +3076,151 @@ func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedGreaterEqual */ // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 @@ -4011,37 +3228,31 @@ func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedIsNan */ // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 @@ -4049,181 +3260,151 @@ func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 /* MaskedLess */ // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 @@ -4231,181 +3412,151 @@ func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedLessEqual */ // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 @@ -4898,181 +4049,151 @@ func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 /* MaskedNotEqual */ // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 @@ -5576,78 +4697,34 @@ func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 -/* MaskedRoundSuppressExceptionWithPrecision */ - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedRoundWithPrecision */ // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -6826,78 +5903,34 @@ func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 -/* MaskedTruncSuppressExceptionWithPrecision */ - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedTruncWithPrecision */ // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -7538,181 +6571,151 @@ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* NotEqual */ // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) NotEqual(y Float32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) NotEqual(y Float32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) NotEqual(y Float32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) NotEqual(y Float64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) NotEqual(y Float64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) NotEqual(y Int8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) NotEqual(y Int8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) NotEqual(y Int8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqual(y Int16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) NotEqual(y Int32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) NotEqual(y Int32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) NotEqual(y Int32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) NotEqual(y Int64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) NotEqual(y Int64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) NotEqual(y Int64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 @@ -8407,101 +7410,53 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* Round */ // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Round() Float32x4 // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Round() Float32x8 // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Round() Float64x2 // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Round() Float64x4 -/* RoundSuppressExceptionWithPrecision */ - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* RoundWithPrecision */ // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 @@ -9891,101 +8846,53 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* Trunc */ // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Trunc() Float32x4 // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Trunc() Float32x8 // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Trunc() Float64x2 // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Trunc() Float64x4 -/* TruncSuppressExceptionWithPrecision */ - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* TruncWithPrecision */ // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 From 0710cce6eb0d75db1fc6c45807773f40edb14d73 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 30 Jun 2025 16:42:19 -0400 Subject: [PATCH 056/139] [dev.simd] runtime: remove write barrier in xRegRestore Currently, there's a write barrier in xRegRestore when it assigns pp.xRegs.cache = gp.xRegs.state. This is bad because that gets called on the asyncPreempt return path, where we have really limited stack space, and we don't currently account for this write barrier. We can't simply mark xRegState as sys.NotInHeap because it's also embedded in runtime.p as register scratch space, and runtime.p is heap allocated. Hence, to fix this, we rename xRegState to just "xRegs" and introduce a wrapper "xRegState" type that embeds xRegs and is itself marked sys.NotInHeap. Then, anywhere we need a manually-managed pointer to register state, we use the new type. To ensure this doesn't happen again in the future, we also mark asyncPreempt2 as go:nowritebarrierrec. Change-Id: I5ff4841e55ff20047ff7d253ab659ab77aeb3391 Reviewed-on: https://go-review.googlesource.com/c/go/+/684836 Auto-Submit: Austin Clements Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/runtime/mkpreempt.go | 2 +- src/runtime/preempt.go | 9 +++++++++ src/runtime/preempt_amd64.go | 2 +- src/runtime/preempt_xreg.go | 16 +++++++++++++--- 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 29e8288129f686..2bd2ef07fa8292 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -160,7 +160,7 @@ func writeXRegs(arch string, l *layout) { fmt.Fprintf(g.w, ` package runtime -type xRegState struct { +type xRegs struct { `) pos := 0 for _, reg := range l.regs { diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index d053747d3a4db6..22727df74eead2 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -302,7 +302,16 @@ func canPreemptM(mp *m) bool { // asyncPreempt is implemented in assembly. func asyncPreempt() +// asyncPreempt2 is the Go continuation of asyncPreempt. +// +// It must be deeply nosplit because there's untyped data on the stack from +// asyncPreempt. +// +// It must not have any write barriers because we need to limit the amount of +// stack it uses. +// //go:nosplit +//go:nowritebarrierrec func asyncPreempt2() { // We can't grow the stack with untyped data from asyncPreempt, so switch to // the system stack right away. diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go index 44838a1df21dc2..88c0ddd34ade72 100644 --- a/src/runtime/preempt_amd64.go +++ b/src/runtime/preempt_amd64.go @@ -2,7 +2,7 @@ package runtime -type xRegState struct { +type xRegs struct { Z0 [64]byte Z1 [64]byte Z2 [64]byte diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go index f0a47c15d97053..9e05455ddbb747 100644 --- a/src/runtime/preempt_xreg.go +++ b/src/runtime/preempt_xreg.go @@ -19,7 +19,17 @@ package runtime -import "unsafe" +import ( + "internal/runtime/sys" + "unsafe" +) + +// xRegState is long-lived extended register state. It is allocated off-heap and +// manually managed. +type xRegState struct { + _ sys.NotInHeap // Allocated from xRegAlloc + regs xRegs +} // xRegPerG stores extended register state while a goroutine is asynchronously // preempted. This is nil otherwise, so we can reuse a (likely small) pool of @@ -31,7 +41,7 @@ type xRegPerG struct { type xRegPerP struct { // scratch temporary per-P space where [asyncPreempt] saves the register // state before entering Go. It's quickly copied to per-G state. - scratch xRegState + scratch xRegs // cache is a 1-element allocation cache of extended register state used by // asynchronous preemption. On entry to preemption, this is used as a simple @@ -84,7 +94,7 @@ func xRegSave(gp *g) { // If we ever need to save less state (e.g., avoid saving vector registers // that aren't in use), we could have multiple allocation pools for // different size states and copy only the registers we need. - *dest = pp.xRegs.scratch + dest.regs = pp.xRegs.scratch // Save on the G. gp.xRegs.state = dest From 1ee72a15a3e893c82cc7108c49f141e824f941c2 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 1 Jul 2025 18:00:33 +0000 Subject: [PATCH 057/139] [dev.simd] internal/cpu: add GFNI feature check This CL amends HasAVX512 flag with GFNI check. This is needed because our SIMD API supports Galois Field operations. Change-Id: I3e957b7b2215d2b7b6b8a7a0ca3e2e60d453b2e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/685295 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/internal/cpu/cpu.go | 54 +++++++++++++++++++------------------ src/internal/cpu/cpu_x86.go | 5 +++- src/simd/cpu.go | 5 ++++ src/simd/simd_test.go | 8 +++--- 4 files changed, 41 insertions(+), 31 deletions(-) diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index a93eb54ddf0cd7..1eeb580711439e 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -26,32 +26,34 @@ var CacheLineSize uintptr = CacheLinePadSize // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. var X86 struct { - _ CacheLinePad - HasAES bool - HasADX bool - HasAVX bool - HasAVX2 bool - HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL - HasAVX512F bool - HasAVX512CD bool - HasAVX512BW bool - HasAVX512DQ bool - HasAVX512VL bool - HasBMI1 bool - HasBMI2 bool - HasERMS bool - HasFSRM bool - HasFMA bool - HasOSXSAVE bool - HasPCLMULQDQ bool - HasPOPCNT bool - HasRDTSCP bool - HasSHA bool - HasSSE3 bool - HasSSSE3 bool - HasSSE41 bool - HasSSE42 bool - _ CacheLinePad + _ CacheLinePad + HasAES bool + HasADX bool + HasAVX bool + HasAVX2 bool + HasAVX512GFNI bool // Virtual feature: F+CD+BW+DQ+VL+GFNI + HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL + HasAVX512F bool + HasAVX512CD bool + HasAVX512BW bool + HasAVX512DQ bool + HasAVX512VL bool + HasBMI1 bool + HasBMI2 bool + HasERMS bool + HasFSRM bool + HasFMA bool + HasGFNI bool + HasOSXSAVE bool + HasPCLMULQDQ bool + HasPOPCNT bool + HasRDTSCP bool + HasSHA bool + HasSSE3 bool + HasSSSE3 bool + HasSSE41 bool + HasSSE42 bool + _ CacheLinePad } // The booleans in ARM contain the correspondingly named cpu feature bit. diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index 7d6f40c1326759..152a08cdbfd11a 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -22,6 +22,7 @@ const ( cpuid_SSE3 = 1 << 0 cpuid_PCLMULQDQ = 1 << 1 cpuid_SSSE3 = 1 << 9 + cpuid_GFNI = 1 << 8 cpuid_FMA = 1 << 12 cpuid_SSE41 = 1 << 19 cpuid_SSE42 = 1 << 20 @@ -143,7 +144,7 @@ func doinit() { return } - _, ebx7, _, edx7 := cpuid(7, 0) + _, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) @@ -160,6 +161,7 @@ func doinit() { } X86.HasFSRM = isSet(edx7, cpuid_FSRM) + X86.HasGFNI = isSet(ecx7, cpuid_GFNI) var maxExtendedInformation uint32 maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0) @@ -180,6 +182,7 @@ func doinit() { // it. GOAMD64=v4 also implies exactly this set, and these are all // included in AVX10.1. X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL + X86.HasAVX512GFNI = X86.HasAVX512 && X86.HasGFNI } } diff --git a/src/simd/cpu.go b/src/simd/cpu.go index b07b5288f20e25..5ff47b8873488d 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -11,6 +11,11 @@ package simd import "internal/cpu" +// HasAVX512GFNI checks AVX512 CPU feature F+CD+BW+DQ+VL+GFNI. +func HasAVX512GFNI() bool { + return cpu.X86.HasAVX512GFNI +} + // HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. func HasAVX512() bool { return cpu.X86.HasAVX512 diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 084b0af53937e6..59908d60c520ae 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -38,7 +38,7 @@ func TestType(t *testing.T) { v.y = &y sink = y - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } @@ -97,7 +97,7 @@ func TestReflectMethod(t *testing.T) { } func TestVectorConversion(t *testing.T) { - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } @@ -115,7 +115,7 @@ func TestVectorConversion(t *testing.T) { } func TestMaskConversion(t *testing.T) { - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } @@ -144,7 +144,7 @@ func TestSub(t *testing.T) { } func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } From 72c39ef83470334b1e592312d30ebef9a1e8ddda Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 2 Jul 2025 14:28:10 -0400 Subject: [PATCH 058/139] [dev.simd] cmd/compile: fix the "always panic" code to actually panic without this change, the intrinsics of non-constant immediates just substitute a zero, which is wrong. Change-Id: I2c39ebedcfb0d0d6c072f4434f393027c6f3f033 Reviewed-on: https://go-review.googlesource.com/c/go/+/685575 Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 73e84077fd2b9d..c47b0898150b97 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1636,7 +1636,7 @@ func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa func plainPanicSimdImm(s *state) { cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) - cmp.AuxInt = 1 + cmp.AuxInt = 0 // TODO: make this a standalone panic instead of reusing the overflow panic. // Or maybe after we implement the switch table this will be obsolete anyway. s.check(cmp, ir.Syms.Panicoverflow) From dfd75f82d4aa21c4fc841f85c175934915590b5e Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 2 Jul 2025 15:13:24 -0400 Subject: [PATCH 059/139] [dev.simd] cmd/compile: output of simdgen with invariant type order The old order was somewhat input-dependent, and sometimes produced spurious changes. This is the last spurious change, "once and for all!!!" Generated by simdgen CL 685595 Change-Id: Ic66d0263f3dd9f1ef9502c2deeeb8300ca3bac75 Reviewed-on: https://go-review.googlesource.com/c/go/+/685615 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- .../compile/internal/ssagen/simdintrinsics.go | 48 +-- src/simd/types_amd64.go | 324 +++++++++--------- 2 files changed, 186 insertions(+), 186 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 903febac371eeb..9837f07fc47dd7 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1986,30 +1986,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) @@ -2070,6 +2046,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 67f4d297024b01..6cc79275767c4c 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -9,6 +9,44 @@ type v128 struct { _128 struct{} } +// Float32x4 is a 128-bit SIMD vector of 4 float32 +type Float32x4 struct { + float32x4 v128 + vals [4]float32 +} + +// Len returns the number of elements in a Float32x4 +func (x Float32x4) Len() int { return 4 } + +// LoadFloat32x4 loads a Float32x4 from an array +// +//go:noescape +func LoadFloat32x4(y *[4]float32) Float32x4 + +// Store stores a Float32x4 to an array +// +//go:noescape +func (x Float32x4) Store(y *[4]float32) + +// Float64x2 is a 128-bit SIMD vector of 2 float64 +type Float64x2 struct { + float64x2 v128 + vals [2]float64 +} + +// Len returns the number of elements in a Float64x2 +func (x Float64x2) Len() int { return 2 } + +// LoadFloat64x2 loads a Float64x2 from an array +// +//go:noescape +func LoadFloat64x2(y *[2]float64) Float64x2 + +// Store stores a Float64x2 to an array +// +//go:noescape +func (x Float64x2) Store(y *[2]float64) + // Int8x16 is a 128-bit SIMD vector of 16 int8 type Int8x16 struct { int8x16 v128 @@ -85,50 +123,6 @@ func LoadInt64x2(y *[2]int64) Int64x2 //go:noescape func (x Int64x2) Store(y *[2]int64) -// Mask64x2 is a 128-bit SIMD vector of 2 int64 -type Mask64x2 struct { - int64x2 v128 - vals [2]int64 -} - -// Float32x4 is a 128-bit SIMD vector of 4 float32 -type Float32x4 struct { - float32x4 v128 - vals [4]float32 -} - -// Len returns the number of elements in a Float32x4 -func (x Float32x4) Len() int { return 4 } - -// LoadFloat32x4 loads a Float32x4 from an array -// -//go:noescape -func LoadFloat32x4(y *[4]float32) Float32x4 - -// Store stores a Float32x4 to an array -// -//go:noescape -func (x Float32x4) Store(y *[4]float32) - -// Float64x2 is a 128-bit SIMD vector of 2 float64 -type Float64x2 struct { - float64x2 v128 - vals [2]float64 -} - -// Len returns the number of elements in a Float64x2 -func (x Float64x2) Len() int { return 2 } - -// LoadFloat64x2 loads a Float64x2 from an array -// -//go:noescape -func LoadFloat64x2(y *[2]float64) Float64x2 - -// Store stores a Float64x2 to an array -// -//go:noescape -func (x Float64x2) Store(y *[2]float64) - // Uint8x16 is a 128-bit SIMD vector of 16 uint8 type Uint8x16 struct { uint8x16 v128 @@ -205,12 +199,6 @@ func LoadUint64x2(y *[2]uint64) Uint64x2 //go:noescape func (x Uint64x2) Store(y *[2]uint64) -// Mask32x4 is a 128-bit SIMD vector of 4 int32 -type Mask32x4 struct { - int32x4 v128 - vals [4]int32 -} - // Mask8x16 is a 128-bit SIMD vector of 16 int8 type Mask8x16 struct { int8x16 v128 @@ -223,11 +211,61 @@ type Mask16x8 struct { vals [8]int16 } +// Mask32x4 is a 128-bit SIMD vector of 4 int32 +type Mask32x4 struct { + int32x4 v128 + vals [4]int32 +} + +// Mask64x2 is a 128-bit SIMD vector of 2 int64 +type Mask64x2 struct { + int64x2 v128 + vals [2]int64 +} + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} } +// Float32x8 is a 256-bit SIMD vector of 8 float32 +type Float32x8 struct { + float32x8 v256 + vals [8]float32 +} + +// Len returns the number of elements in a Float32x8 +func (x Float32x8) Len() int { return 8 } + +// LoadFloat32x8 loads a Float32x8 from an array +// +//go:noescape +func LoadFloat32x8(y *[8]float32) Float32x8 + +// Store stores a Float32x8 to an array +// +//go:noescape +func (x Float32x8) Store(y *[8]float32) + +// Float64x4 is a 256-bit SIMD vector of 4 float64 +type Float64x4 struct { + float64x4 v256 + vals [4]float64 +} + +// Len returns the number of elements in a Float64x4 +func (x Float64x4) Len() int { return 4 } + +// LoadFloat64x4 loads a Float64x4 from an array +// +//go:noescape +func LoadFloat64x4(y *[4]float64) Float64x4 + +// Store stores a Float64x4 to an array +// +//go:noescape +func (x Float64x4) Store(y *[4]float64) + // Int8x32 is a 256-bit SIMD vector of 32 int8 type Int8x32 struct { int8x32 v256 @@ -304,50 +342,6 @@ func LoadInt64x4(y *[4]int64) Int64x4 //go:noescape func (x Int64x4) Store(y *[4]int64) -// Mask64x4 is a 256-bit SIMD vector of 4 int64 -type Mask64x4 struct { - int64x4 v256 - vals [4]int64 -} - -// Float32x8 is a 256-bit SIMD vector of 8 float32 -type Float32x8 struct { - float32x8 v256 - vals [8]float32 -} - -// Len returns the number of elements in a Float32x8 -func (x Float32x8) Len() int { return 8 } - -// LoadFloat32x8 loads a Float32x8 from an array -// -//go:noescape -func LoadFloat32x8(y *[8]float32) Float32x8 - -// Store stores a Float32x8 to an array -// -//go:noescape -func (x Float32x8) Store(y *[8]float32) - -// Float64x4 is a 256-bit SIMD vector of 4 float64 -type Float64x4 struct { - float64x4 v256 - vals [4]float64 -} - -// Len returns the number of elements in a Float64x4 -func (x Float64x4) Len() int { return 4 } - -// LoadFloat64x4 loads a Float64x4 from an array -// -//go:noescape -func LoadFloat64x4(y *[4]float64) Float64x4 - -// Store stores a Float64x4 to an array -// -//go:noescape -func (x Float64x4) Store(y *[4]float64) - // Uint8x32 is a 256-bit SIMD vector of 32 uint8 type Uint8x32 struct { uint8x32 v256 @@ -424,12 +418,6 @@ func LoadUint64x4(y *[4]uint64) Uint64x4 //go:noescape func (x Uint64x4) Store(y *[4]uint64) -// Mask32x8 is a 256-bit SIMD vector of 8 int32 -type Mask32x8 struct { - int32x8 v256 - vals [8]int32 -} - // Mask8x32 is a 256-bit SIMD vector of 32 int8 type Mask8x32 struct { int8x32 v256 @@ -442,11 +430,61 @@ type Mask16x16 struct { vals [16]int16 } +// Mask32x8 is a 256-bit SIMD vector of 8 int32 +type Mask32x8 struct { + int32x8 v256 + vals [8]int32 +} + +// Mask64x4 is a 256-bit SIMD vector of 4 int64 +type Mask64x4 struct { + int64x4 v256 + vals [4]int64 +} + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} } +// Float32x16 is a 512-bit SIMD vector of 16 float32 +type Float32x16 struct { + float32x16 v512 + vals [16]float32 +} + +// Len returns the number of elements in a Float32x16 +func (x Float32x16) Len() int { return 16 } + +// LoadFloat32x16 loads a Float32x16 from an array +// +//go:noescape +func LoadFloat32x16(y *[16]float32) Float32x16 + +// Store stores a Float32x16 to an array +// +//go:noescape +func (x Float32x16) Store(y *[16]float32) + +// Float64x8 is a 512-bit SIMD vector of 8 float64 +type Float64x8 struct { + float64x8 v512 + vals [8]float64 +} + +// Len returns the number of elements in a Float64x8 +func (x Float64x8) Len() int { return 8 } + +// LoadFloat64x8 loads a Float64x8 from an array +// +//go:noescape +func LoadFloat64x8(y *[8]float64) Float64x8 + +// Store stores a Float64x8 to an array +// +//go:noescape +func (x Float64x8) Store(y *[8]float64) + // Int8x64 is a 512-bit SIMD vector of 64 int8 type Int8x64 struct { int8x64 v512 @@ -466,12 +504,6 @@ func LoadInt8x64(y *[64]int8) Int8x64 //go:noescape func (x Int8x64) Store(y *[64]int8) -// Mask8x64 is a 512-bit SIMD vector of 64 int8 -type Mask8x64 struct { - int8x64 v512 - vals [64]int8 -} - // Int16x32 is a 512-bit SIMD vector of 32 int16 type Int16x32 struct { int16x32 v512 @@ -491,12 +523,6 @@ func LoadInt16x32(y *[32]int16) Int16x32 //go:noescape func (x Int16x32) Store(y *[32]int16) -// Mask16x32 is a 512-bit SIMD vector of 32 int16 -type Mask16x32 struct { - int16x32 v512 - vals [32]int16 -} - // Int32x16 is a 512-bit SIMD vector of 16 int32 type Int32x16 struct { int32x16 v512 @@ -516,12 +542,6 @@ func LoadInt32x16(y *[16]int32) Int32x16 //go:noescape func (x Int32x16) Store(y *[16]int32) -// Mask32x16 is a 512-bit SIMD vector of 16 int32 -type Mask32x16 struct { - int32x16 v512 - vals [16]int32 -} - // Int64x8 is a 512-bit SIMD vector of 8 int64 type Int64x8 struct { int64x8 v512 @@ -541,50 +561,6 @@ func LoadInt64x8(y *[8]int64) Int64x8 //go:noescape func (x Int64x8) Store(y *[8]int64) -// Mask64x8 is a 512-bit SIMD vector of 8 int64 -type Mask64x8 struct { - int64x8 v512 - vals [8]int64 -} - -// Float32x16 is a 512-bit SIMD vector of 16 float32 -type Float32x16 struct { - float32x16 v512 - vals [16]float32 -} - -// Len returns the number of elements in a Float32x16 -func (x Float32x16) Len() int { return 16 } - -// LoadFloat32x16 loads a Float32x16 from an array -// -//go:noescape -func LoadFloat32x16(y *[16]float32) Float32x16 - -// Store stores a Float32x16 to an array -// -//go:noescape -func (x Float32x16) Store(y *[16]float32) - -// Float64x8 is a 512-bit SIMD vector of 8 float64 -type Float64x8 struct { - float64x8 v512 - vals [8]float64 -} - -// Len returns the number of elements in a Float64x8 -func (x Float64x8) Len() int { return 8 } - -// LoadFloat64x8 loads a Float64x8 from an array -// -//go:noescape -func LoadFloat64x8(y *[8]float64) Float64x8 - -// Store stores a Float64x8 to an array -// -//go:noescape -func (x Float64x8) Store(y *[8]float64) - // Uint8x64 is a 512-bit SIMD vector of 64 uint8 type Uint8x64 struct { uint8x64 v512 @@ -660,3 +636,27 @@ func LoadUint64x8(y *[8]uint64) Uint64x8 // //go:noescape func (x Uint64x8) Store(y *[8]uint64) + +// Mask8x64 is a 512-bit SIMD vector of 64 int8 +type Mask8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Mask16x32 is a 512-bit SIMD vector of 32 int16 +type Mask16x32 struct { + int16x32 v512 + vals [32]int16 +} + +// Mask32x16 is a 512-bit SIMD vector of 16 int32 +type Mask32x16 struct { + int32x16 v512 + vals [16]int32 +} + +// Mask64x8 is a 512-bit SIMD vector of 8 int64 +type Mask64x8 struct { + int64x8 v512 + vals [8]int64 +} From d8fa853b37e364bb6a2356deda9073b1a1bc761d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 7 Jul 2025 03:08:01 +0000 Subject: [PATCH 060/139] [dev.simd] cmd/compile: make regalloc simd aware on copy When making a temporary copy, regalloc should be aware of the SIMD-ness of the type; otherwise it might generate invalid moves. Change-Id: I722c3a0111d0990af32d84c6aaa151f1ac8c1f00 Reviewed-on: https://go-review.googlesource.com/c/go/+/685895 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/regalloc.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index f1e210fe9b8b08..d4ce7a815b8b2c 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -898,6 +898,14 @@ func (s *regAllocState) compatRegs(t *types.Type) regMask { if t.IsTuple() || t.IsFlags() { return 0 } + if t.IsSIMD() { + if t.Size() > 8 { + return s.f.Config.fpRegMask & s.allocatable + } else { + // K mask + return s.f.Config.gpRegMask & s.allocatable + } + } if t.IsFloat() || t == types.TypeInt128 { if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 { m = s.f.Config.fp32RegMask From 292db9b676d96d9a231bcc743b8e5c835240be44 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 25 Jun 2025 16:06:00 -0400 Subject: [PATCH 061/139] [dev.simd] cmd/compile: add INSERT[IF]128 instructions This CL is created by simdgen CL 684055 and should be submitted after it. Also includes a test. Change-Id: I2ad7ae51d11cfc19745e866150e2eaf010d4ea49 Reviewed-on: https://go-review.googlesource.com/c/go/+/684077 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 2 + .../compile/internal/ssa/_gen/simdAMD64.rules | 10 ++ .../compile/internal/ssa/_gen/simdAMD64ops.go | 2 + .../internal/ssa/_gen/simdgenericOps.go | 10 ++ src/cmd/compile/internal/ssa/opGen.go | 102 +++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 170 ++++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 10 ++ src/simd/simd_test.go | 16 ++ src/simd/simd_wrapped_test.go | 1 + src/simd/stubs_amd64.go | 52 ++++++ 10 files changed, 375 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 999f3c200ce798..ac2848d1bafa69 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -706,6 +706,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VGF2P8AFFINEINVQB128, ssa.OpAMD64VGF2P8AFFINEINVQB256, ssa.OpAMD64VGF2P8AFFINEINVQB512, + ssa.OpAMD64VINSERTF128256, + ssa.OpAMD64VINSERTI128256, ssa.OpAMD64VPSHLDW128, ssa.OpAMD64VPSHLDW256, ssa.OpAMD64VPSHLDW512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 3768c5aaadc338..6b1078e7412798 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1452,6 +1452,16 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(Set128Float32x8 [a] x y) => (VINSERTF128256 [a] x y) +(Set128Float64x4 [a] x y) => (VINSERTF128256 [a] x y) +(Set128Int8x32 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Int16x16 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Int32x8 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Int64x4 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint8x32 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint16x16 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint32x8 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint64x4 [a] x y) => (VINSERTI128256 [a] x y) (SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) (SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5e627e696e96f0..787d3c5fcbf50c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -768,6 +768,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: fp21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -879,6 +880,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW256", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index b68b237c312548..076a16ebda6110 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1511,6 +1511,7 @@ func simdGenericOps() []opData { {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, @@ -1543,6 +1544,7 @@ func simdGenericOps() []opData { {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, @@ -1562,6 +1564,7 @@ func simdGenericOps() []opData { {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, @@ -1598,6 +1601,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, @@ -1616,6 +1620,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftInt64x8", argLength: 2, commutative: false, aux: "Int8"}, @@ -1628,8 +1633,10 @@ func simdGenericOps() []opData { {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, @@ -1666,6 +1673,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, @@ -1684,6 +1692,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftUint64x8", argLength: 2, commutative: false, aux: "Int8"}, @@ -1704,6 +1713,7 @@ func simdGenericOps() []opData { {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformUint8x64", argLength: 3, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index fec727ea12ec78..ece791ca6cea71 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1961,6 +1961,7 @@ const ( OpAMD64VRNDSCALEPSMasked256 OpAMD64VREDUCEPSMasked256 OpAMD64VCMPPSMasked256 + OpAMD64VINSERTF128256 OpAMD64VROUNDPD128 OpAMD64VRNDSCALEPD128 OpAMD64VREDUCEPD128 @@ -2072,6 +2073,7 @@ const ( OpAMD64VPINSRB128 OpAMD64VPCMPB256 OpAMD64VPCMPBMasked256 + OpAMD64VINSERTI128256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 OpAMD64VPCMPUW256 @@ -5844,6 +5846,7 @@ const ( OpMaskedRoundWithPrecisionFloat32x8 OpMaskedTruncWithPrecisionFloat32x8 OpRoundWithPrecisionFloat32x8 + OpSet128Float32x8 OpTruncWithPrecisionFloat32x8 OpCeilWithPrecisionFloat64x2 OpDiffWithCeilWithPrecisionFloat64x2 @@ -5876,6 +5879,7 @@ const ( OpMaskedRoundWithPrecisionFloat64x4 OpMaskedTruncWithPrecisionFloat64x4 OpRoundWithPrecisionFloat64x4 + OpSet128Float64x4 OpTruncWithPrecisionFloat64x4 OpCeilWithPrecisionFloat64x8 OpDiffWithCeilWithPrecisionFloat64x8 @@ -5895,6 +5899,7 @@ const ( OpTruncWithPrecisionFloat64x8 OpMaskedShiftAllLeftAndFillUpperFromInt16x16 OpMaskedShiftAllRightAndFillUpperFromInt16x16 + OpSet128Int16x16 OpShiftAllLeftAndFillUpperFromInt16x16 OpShiftAllRightAndFillUpperFromInt16x16 OpMaskedShiftAllLeftAndFillUpperFromInt16x32 @@ -5931,6 +5936,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromInt32x8 OpRotateAllLeftInt32x8 OpRotateAllRightInt32x8 + OpSet128Int32x8 OpShiftAllLeftAndFillUpperFromInt32x8 OpShiftAllRightAndFillUpperFromInt32x8 OpGetElemInt64x2 @@ -5949,6 +5955,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromInt64x4 OpRotateAllLeftInt64x4 OpRotateAllRightInt64x4 + OpSet128Int64x4 OpShiftAllLeftAndFillUpperFromInt64x4 OpShiftAllRightAndFillUpperFromInt64x4 OpMaskedRotateAllLeftInt64x8 @@ -5961,8 +5968,10 @@ const ( OpShiftAllRightAndFillUpperFromInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 + OpSet128Int8x32 OpMaskedShiftAllLeftAndFillUpperFromUint16x16 OpMaskedShiftAllRightAndFillUpperFromUint16x16 + OpSet128Uint16x16 OpShiftAllLeftAndFillUpperFromUint16x16 OpShiftAllRightAndFillUpperFromUint16x16 OpMaskedShiftAllLeftAndFillUpperFromUint16x32 @@ -5999,6 +6008,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromUint32x8 OpRotateAllLeftUint32x8 OpRotateAllRightUint32x8 + OpSet128Uint32x8 OpShiftAllLeftAndFillUpperFromUint32x8 OpShiftAllRightAndFillUpperFromUint32x8 OpGetElemUint64x2 @@ -6017,6 +6027,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromUint64x4 OpRotateAllLeftUint64x4 OpRotateAllRightUint64x4 + OpSet128Uint64x4 OpShiftAllLeftAndFillUpperFromUint64x4 OpShiftAllRightAndFillUpperFromUint64x4 OpMaskedRotateAllLeftUint64x8 @@ -6037,6 +6048,7 @@ const ( OpGaloisFieldAffineTransformInversedUint8x32 OpMaskedGaloisFieldAffineTransformUint8x32 OpMaskedGaloisFieldAffineTransformInversedUint8x32 + OpSet128Uint8x32 OpGaloisFieldAffineTransformUint8x64 OpGaloisFieldAffineTransformInversedUint8x64 OpMaskedGaloisFieldAffineTransformUint8x64 @@ -30131,6 +30143,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VINSERTF128256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTF128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VROUNDPD128", auxType: auxInt8, @@ -31825,6 +31852,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VINSERTI128256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTI128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPB512", auxType: auxInt8, @@ -67718,6 +67760,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Float32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, @@ -67910,6 +67958,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Float64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, @@ -68024,6 +68078,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Set128Int16x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, @@ -68240,6 +68300,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Int32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, @@ -68348,6 +68414,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Int64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, @@ -68420,6 +68492,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Set128Int8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, @@ -68432,6 +68510,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Set128Uint16x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, @@ -68648,6 +68732,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Uint32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, @@ -68756,6 +68846,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Uint64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, @@ -68876,6 +68972,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Set128Uint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "GaloisFieldAffineTransformUint8x64", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 15ca2fcc5b4dfe..5c1872dcdfd313 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4411,6 +4411,26 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) + case OpSet128Float32x8: + return rewriteValueAMD64_OpSet128Float32x8(v) + case OpSet128Float64x4: + return rewriteValueAMD64_OpSet128Float64x4(v) + case OpSet128Int16x16: + return rewriteValueAMD64_OpSet128Int16x16(v) + case OpSet128Int32x8: + return rewriteValueAMD64_OpSet128Int32x8(v) + case OpSet128Int64x4: + return rewriteValueAMD64_OpSet128Int64x4(v) + case OpSet128Int8x32: + return rewriteValueAMD64_OpSet128Int8x32(v) + case OpSet128Uint16x16: + return rewriteValueAMD64_OpSet128Uint16x16(v) + case OpSet128Uint32x8: + return rewriteValueAMD64_OpSet128Uint32x8(v) + case OpSet128Uint64x4: + return rewriteValueAMD64_OpSet128Uint64x4(v) + case OpSet128Uint8x32: + return rewriteValueAMD64_OpSet128Uint8x32(v) case OpSetElemInt16x8: return rewriteValueAMD64_OpSetElemInt16x8(v) case OpSetElemInt32x4: @@ -53102,6 +53122,156 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Float32x8 [a] x y) + // result: (VINSERTF128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Float64x4 [a] x y) + // result: (VINSERTF128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 9837f07fc47dd7..3d0e6fbd4aa75f 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1463,6 +1463,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x16.Set128", opLen2Imm8(ssa.OpSet128Int16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x8.Set128", opLen2Imm8(ssa.OpSet128Int32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x4.Set128", opLen2Imm8(ssa.OpSet128Int64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.Set128", opLen2Imm8(ssa.OpSet128Uint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.Set128", opLen2Imm8(ssa.OpSet128Uint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.Set128", opLen2Imm8(ssa.OpSet128Uint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.Set128", opLen2Imm8(ssa.OpSet128Uint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 59908d60c520ae..f99938bb9d29e9 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -193,6 +193,22 @@ func TestSlicesInt8GetElem(t *testing.T) { } } + +func TestSlicesInt8Set128(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) // 1-16 + u := simd.LoadInt8x32Slice(a) // 1-32 + + w := u.Set128(1, v) // 1-16:1-16 + + b := make([]int8, 32, 32) + w.StoreSlice(b) + + checkInt8Slices(t, a, b[:16]) + checkInt8Slices(t, a, b[16:]) +} + func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 321d3bb80a4fe1..4a8c0957e5b37e 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7975,6 +7975,7 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // RotateAllLeft // RotateAllRight // RoundWithPrecision +// Set128 // SetElem // ShiftAllLeft // ShiftAllLeftAndFillUpperFrom diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index f53242cd738daa..de54a9ada48cb3 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7682,6 +7682,58 @@ func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* Set128 */ + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float32x8) Set128(imm uint8, y Float32x4) Float32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float64x4) Set128(imm uint8, y Float64x2) Float64x4 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int8x32) Set128(imm uint8, y Int8x16) Int8x32 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int16x16) Set128(imm uint8, y Int16x8) Int16x16 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int32x8) Set128(imm uint8, y Int32x4) Int32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int64x4) Set128(imm uint8, y Int64x2) Int64x4 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint8x32) Set128(imm uint8, y Uint8x16) Uint8x32 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint16x16) Set128(imm uint8, y Uint16x8) Uint16x16 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint32x8) Set128(imm uint8, y Uint32x4) Uint32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint64x4) Set128(imm uint8, y Uint64x2) Uint64x4 + /* SetElem */ // SetElem sets a single constant-indexed element's value. From 43a61aef56a7d4aadd1d2af298c51ff31d23c04b Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 25 Jun 2025 18:20:50 -0400 Subject: [PATCH 062/139] [dev.simd] cmd/compile: add EXTRACT[IF]128 instructions This is generated by simdgen CL 684080 and should be submitted after it. Also includes tests. Change-Id: I1d680911134d8fb92f4deccae4ec373f3ed9f752 Reviewed-on: https://go-review.googlesource.com/c/go/+/684115 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 2 + .../compile/internal/ssa/_gen/simdAMD64.rules | 10 ++ .../compile/internal/ssa/_gen/simdAMD64ops.go | 2 + .../internal/ssa/_gen/simdgenericOps.go | 10 ++ src/cmd/compile/internal/ssa/opGen.go | 100 ++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 150 ++++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 10 ++ src/simd/simd_test.go | 88 ++++++++++ src/simd/simd_wrapped_test.go | 1 + src/simd/stubs_amd64.go | 52 ++++++ 10 files changed, 425 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index ac2848d1bafa69..fbb63ccaa14386 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -655,6 +655,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPD128, ssa.OpAMD64VREDUCEPD256, ssa.OpAMD64VREDUCEPD512, + ssa.OpAMD64VEXTRACTF128128, + ssa.OpAMD64VEXTRACTI128128, ssa.OpAMD64VPROLD128, ssa.OpAMD64VPROLD256, ssa.OpAMD64VPROLD512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6b1078e7412798..6ba52a9e9c9f93 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -224,6 +224,16 @@ (GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) (GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) (GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) +(Get128Float32x8 [a] x) => (VEXTRACTF128128 [a] x) +(Get128Float64x4 [a] x) => (VEXTRACTF128128 [a] x) +(Get128Int8x32 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Int16x16 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Int32x8 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Int64x4 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint8x32 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint16x16 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint32x8 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint64x4 [a] x) => (VEXTRACTI128128 [a] x) (GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 787d3c5fcbf50c..8c895d9f455950 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -765,6 +765,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, @@ -878,6 +879,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: fp11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 076a16ebda6110..c74893b97a210f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1502,6 +1502,7 @@ func simdGenericOps() []opData { {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, @@ -1535,6 +1536,7 @@ func simdGenericOps() []opData { {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, @@ -1562,6 +1564,7 @@ func simdGenericOps() []opData { {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, @@ -1595,6 +1598,7 @@ func simdGenericOps() []opData { {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, @@ -1614,6 +1618,7 @@ func simdGenericOps() []opData { {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, @@ -1633,7 +1638,9 @@ func simdGenericOps() []opData { {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, @@ -1667,6 +1674,7 @@ func simdGenericOps() []opData { {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, @@ -1686,6 +1694,7 @@ func simdGenericOps() []opData { {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, @@ -1711,6 +1720,7 @@ func simdGenericOps() []opData { {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ece791ca6cea71..91380e5e089e31 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1958,6 +1958,7 @@ const ( OpAMD64VRNDSCALEPS256 OpAMD64VREDUCEPS256 OpAMD64VCMPPS256 + OpAMD64VEXTRACTF128128 OpAMD64VRNDSCALEPSMasked256 OpAMD64VREDUCEPSMasked256 OpAMD64VCMPPSMasked256 @@ -2071,6 +2072,7 @@ const ( OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 OpAMD64VPINSRB128 + OpAMD64VEXTRACTI128128 OpAMD64VPCMPB256 OpAMD64VPCMPBMasked256 OpAMD64VINSERTI128256 @@ -5837,6 +5839,7 @@ const ( OpDiffWithRoundWithPrecisionFloat32x8 OpDiffWithTruncWithPrecisionFloat32x8 OpFloorWithPrecisionFloat32x8 + OpGet128Float32x8 OpMaskedCeilWithPrecisionFloat32x8 OpMaskedDiffWithCeilWithPrecisionFloat32x8 OpMaskedDiffWithFloorWithPrecisionFloat32x8 @@ -5870,6 +5873,7 @@ const ( OpDiffWithRoundWithPrecisionFloat64x4 OpDiffWithTruncWithPrecisionFloat64x4 OpFloorWithPrecisionFloat64x4 + OpGet128Float64x4 OpMaskedCeilWithPrecisionFloat64x4 OpMaskedDiffWithCeilWithPrecisionFloat64x4 OpMaskedDiffWithFloorWithPrecisionFloat64x4 @@ -5897,6 +5901,7 @@ const ( OpMaskedTruncWithPrecisionFloat64x8 OpRoundWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpGet128Int16x16 OpMaskedShiftAllLeftAndFillUpperFromInt16x16 OpMaskedShiftAllRightAndFillUpperFromInt16x16 OpSet128Int16x16 @@ -5930,6 +5935,7 @@ const ( OpSetElemInt32x4 OpShiftAllLeftAndFillUpperFromInt32x4 OpShiftAllRightAndFillUpperFromInt32x4 + OpGet128Int32x8 OpMaskedRotateAllLeftInt32x8 OpMaskedRotateAllRightInt32x8 OpMaskedShiftAllLeftAndFillUpperFromInt32x8 @@ -5949,6 +5955,7 @@ const ( OpSetElemInt64x2 OpShiftAllLeftAndFillUpperFromInt64x2 OpShiftAllRightAndFillUpperFromInt64x2 + OpGet128Int64x4 OpMaskedRotateAllLeftInt64x4 OpMaskedRotateAllRightInt64x4 OpMaskedShiftAllLeftAndFillUpperFromInt64x4 @@ -5968,7 +5975,9 @@ const ( OpShiftAllRightAndFillUpperFromInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 + OpGet128Int8x32 OpSet128Int8x32 + OpGet128Uint16x16 OpMaskedShiftAllLeftAndFillUpperFromUint16x16 OpMaskedShiftAllRightAndFillUpperFromUint16x16 OpSet128Uint16x16 @@ -6002,6 +6011,7 @@ const ( OpSetElemUint32x4 OpShiftAllLeftAndFillUpperFromUint32x4 OpShiftAllRightAndFillUpperFromUint32x4 + OpGet128Uint32x8 OpMaskedRotateAllLeftUint32x8 OpMaskedRotateAllRightUint32x8 OpMaskedShiftAllLeftAndFillUpperFromUint32x8 @@ -6021,6 +6031,7 @@ const ( OpSetElemUint64x2 OpShiftAllLeftAndFillUpperFromUint64x2 OpShiftAllRightAndFillUpperFromUint64x2 + OpGet128Uint64x4 OpMaskedRotateAllLeftUint64x4 OpMaskedRotateAllRightUint64x4 OpMaskedShiftAllLeftAndFillUpperFromUint64x4 @@ -6046,6 +6057,7 @@ const ( OpSetElemUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformInversedUint8x32 + OpGet128Uint8x32 OpMaskedGaloisFieldAffineTransformUint8x32 OpMaskedGaloisFieldAffineTransformInversedUint8x32 OpSet128Uint8x32 @@ -30096,6 +30108,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXTRACTF128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTF128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VRNDSCALEPSMasked256", auxType: auxInt8, @@ -31820,6 +31846,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXTRACTI128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTI128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPB256", auxType: auxInt8, @@ -67706,6 +67746,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Get128Float32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedCeilWithPrecisionFloat32x8", auxType: auxInt8, @@ -67904,6 +67950,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Get128Float64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedCeilWithPrecisionFloat64x4", auxType: auxInt8, @@ -68066,6 +68118,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Get128Int16x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, @@ -68264,6 +68322,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Int32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftInt32x8", auxType: auxInt8, @@ -68378,6 +68442,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Int64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftInt64x4", auxType: auxInt8, @@ -68492,12 +68562,24 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Int8x32", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "Set128Int8x32", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "Get128Uint16x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, @@ -68696,6 +68778,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Uint32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftUint32x8", auxType: auxInt8, @@ -68810,6 +68898,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Uint64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftUint64x4", auxType: auxInt8, @@ -68960,6 +69054,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Uint8x32", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedGaloisFieldAffineTransformUint8x32", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5c1872dcdfd313..1cf23c4ec5b0e4 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1388,6 +1388,26 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldMulUint8x64: v.Op = OpAMD64VGF2P8MULB512 return true + case OpGet128Float32x8: + return rewriteValueAMD64_OpGet128Float32x8(v) + case OpGet128Float64x4: + return rewriteValueAMD64_OpGet128Float64x4(v) + case OpGet128Int16x16: + return rewriteValueAMD64_OpGet128Int16x16(v) + case OpGet128Int32x8: + return rewriteValueAMD64_OpGet128Int32x8(v) + case OpGet128Int64x4: + return rewriteValueAMD64_OpGet128Int64x4(v) + case OpGet128Int8x32: + return rewriteValueAMD64_OpGet128Int8x32(v) + case OpGet128Uint16x16: + return rewriteValueAMD64_OpGet128Uint16x16(v) + case OpGet128Uint32x8: + return rewriteValueAMD64_OpGet128Uint32x8(v) + case OpGet128Uint64x4: + return rewriteValueAMD64_OpGet128Uint64x4(v) + case OpGet128Uint8x32: + return rewriteValueAMD64_OpGet128Uint8x32(v) case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -30999,6 +31019,136 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float32x8 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float64x4 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { v_0 := v.Args[0] // match: (GetElemInt16x8 [a] x) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3d0e6fbd4aa75f..27aad1cc0c449d 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -235,6 +235,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.Get128", opLen1Imm8(ssa.OpGet128Float32x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float64x4.Get128", opLen1Imm8(ssa.OpGet128Float64x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int8x32.Get128", opLen1Imm8(ssa.OpGet128Int8x32, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.Get128", opLen1Imm8(ssa.OpGet128Int16x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.Get128", opLen1Imm8(ssa.OpGet128Int32x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.Get128", opLen1Imm8(ssa.OpGet128Int64x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.Get128", opLen1Imm8(ssa.OpGet128Uint8x32, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.Get128", opLen1Imm8(ssa.OpGet128Uint16x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.Get128", opLen1Imm8(ssa.OpGet128Uint32x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.Get128", opLen1Imm8(ssa.OpGet128Uint64x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index f99938bb9d29e9..1b47d2770ccd61 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -161,6 +161,22 @@ func checkInt8Slices(t *testing.T, a, b []int8) { } } +func checkFloat32Slices(t *testing.T, a, b []float32) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) + } + } +} + +func checkFloat64Slices(t *testing.T, a, b []float64) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) + } + } +} + func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} @@ -209,6 +225,78 @@ func TestSlicesInt8Set128(t *testing.T) { checkInt8Slices(t, a, b[16:]) } +func TestSlicesInt8Get128(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + u := simd.LoadInt8x32Slice(a) // 1-32 + v := u.Get128(0) // 1-16 + w := u.Get128(1) // 17-32 + + b := make([]int8, 32, 32) + v.StoreSlice(b[:16]) + w.StoreSlice(b[16:]) + + checkInt8Slices(t, a, b) +} + +func TestSlicesFloat32Set128(t *testing.T) { + a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadFloat32x4Slice(a) // 1-4 + u := simd.LoadFloat32x8Slice(a) // 1-4 + + w := u.Set128(1, v) // 1-4:1-4 + + b := make([]float32, 8, 8) + w.StoreSlice(b) + + checkFloat32Slices(t, a, b[:4]) + checkFloat32Slices(t, a, b[4:]) +} + +func TestSlicesFloat32Get128(t *testing.T) { + a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + u := simd.LoadFloat32x8Slice(a) // 1-8 + v := u.Get128(0) // 1-4 + w := u.Get128(1) // 5-8 + + b := make([]float32, 8, 8) + v.StoreSlice(b[:4]) + w.StoreSlice(b[4:]) + + checkFloat32Slices(t, a, b) +} + +func TestSlicesFloat64Set128(t *testing.T) { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadFloat64x2Slice(a) // 1-2 + u := simd.LoadFloat64x4Slice(a) // 1-2 + + w := u.Set128(1, v) // 1-2:1-2 + + b := make([]float64, 4, 4) + w.StoreSlice(b) + + checkFloat64Slices(t, a, b[:2]) + checkFloat64Slices(t, a, b[2:]) +} + +func TestSlicesFloat64Get128(t *testing.T) { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + u := simd.LoadFloat64x4Slice(a) // 1-4 + v := u.Get128(0) // 1-2 + w := u.Get128(1) // 3-4 + + b := make([]float64, 4, 4) + v.StoreSlice(b[:2]) + w.StoreSlice(b[2:]) + + checkFloat64Slices(t, a, b) +} + func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 4a8c0957e5b37e..b3f18b383772b6 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7954,6 +7954,7 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // FloorWithPrecision // GaloisFieldAffineTransform // GaloisFieldAffineTransformInversed +// Get128 // GetElem // MaskedCeilWithPrecision // MaskedDiffWithCeilWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index de54a9ada48cb3..3453843d0f7991 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1198,6 +1198,58 @@ func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 +/* Get128 */ + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float32x8) Get128(imm uint8) Float32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float64x4) Get128(imm uint8) Float64x2 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int8x32) Get128(imm uint8) Int8x16 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int16x16) Get128(imm uint8) Int16x8 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int32x8) Get128(imm uint8) Int32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int64x4) Get128(imm uint8) Int64x2 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint8x32) Get128(imm uint8) Uint8x16 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint16x16) Get128(imm uint8) Uint16x8 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint32x8) Get128(imm uint8) Uint32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint64x4) Get128(imm uint8) Uint64x2 + /* GetElem */ // GetElem retrieves a single constant-indexed element's value. From 2bb45cb8a55f5e2fc9c31c3473899f5dcdff7163 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 2 Jul 2025 18:00:12 -0400 Subject: [PATCH 063/139] [dev.simd] cmd/compile: minor tweak for race detector This makes the front-end a little bit less temp-happy when instrumenting, which repairs the "is it a constant?" test in the simd intrinsic conversion which is otherwise broken by race detection. Also, this will perhaps be better code. Change-Id: I84b7a45b7bff62bb2c9f9662466b50858d288645 Reviewed-on: https://go-review.googlesource.com/c/go/+/685637 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui --- src/cmd/compile/internal/walk/walk.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 2fa51f12809362..8b4381980d58eb 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -311,6 +311,15 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { // function calls, which could clobber function call arguments/results // currently on the stack. func mayCall(n ir.Node) bool { + // This is intended to avoid putting constants + // into temporaries with the race detector (or other + // instrumentation) which interferes with simple + // "this is a constant" tests in ssagen. + // Also, it will generally lead to better code. + if n.Op() == ir.OLITERAL { + return false + } + // When instrumenting, any expression might require function calls. if base.Flag.Cfg.Instrumenting { return true From 24f2b8ae2e1ad78464b2f5eacbb6b6cf7bde2a52 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 26 Jun 2025 17:41:40 -0400 Subject: [PATCH 064/139] [dev.simd] simd: {Int,Uint}{8x{16,32},16x{8,16}} subvector loads/stores from slices. Includes tests, which turned out to be necessary. Change-Id: I13437f3c1b6a614481d4bef332666485dbee4c4e Reviewed-on: https://go-review.googlesource.com/c/go/+/684839 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/simd/simd_test.go | 26 ++- src/simd/slicepart_amd64.go | 387 ++++++++++++++++++++++++++++++++++++ src/simd/slicepart_test.go | 186 +++++++++++++++++ 3 files changed, 598 insertions(+), 1 deletion(-) create mode 100644 src/simd/slicepart_amd64.go create mode 100644 src/simd/slicepart_test.go diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 1b47d2770ccd61..e2324e8da5c5ea 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.simd +//go:build goexperiment.simd && amd64 package simd_test @@ -161,6 +161,30 @@ func checkInt8Slices(t *testing.T, a, b []int8) { } } +func checkUint8Slices(t *testing.T, a, b []uint8) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + +func checkInt16Slices(t *testing.T, a, b []int16) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + +func checkUint16Slices(t *testing.T, a, b []uint16) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + func checkFloat32Slices(t *testing.T, a, b []float32) { for i := range b { if a[i] != b[i] { diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go new file mode 100644 index 00000000000000..7f5247cd8c25b8 --- /dev/null +++ b/src/simd/slicepart_amd64.go @@ -0,0 +1,387 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd + +package simd + +import "unsafe" + +// Implementation of all the {Int,Uint}{8,16} load and store slice part +// functions and methods for 128-bit and 256-bit vectors. + +/* pointer-punning functions. */ + +func int16atP8(p *int8) *int16 { + return (*int16)(unsafe.Pointer(p)) +} + +func int32atP8(p *int8) *int32 { + return (*int32)(unsafe.Pointer(p)) +} + +func int64atP8(p *int8) *int64 { + return (*int64)(unsafe.Pointer(p)) +} + +func int32atP16(p *int16) *int32 { + return (*int32)(unsafe.Pointer(p)) +} + +func int64atP16(p *int16) *int64 { + return (*int64)(unsafe.Pointer(p)) +} + +func int64atP32(p *int32) *int64 { + return (*int64)(unsafe.Pointer(p)) +} + +/* unsigned versions of integer slice part loads */ + +// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. +func LoadUint8x16SlicePart(s []uint8) Uint8x16 { + if len(s) == 0 { + var zero Uint8x16 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x16SlicePart(t).AsUint8x16() +} + +// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. +func LoadUint16x8SlicePart(s []uint16) Uint16x8 { + if len(s) == 0 { + var zero Uint16x8 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x8SlicePart(t).AsUint16x8() +} + +// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. +func LoadUint8x32SlicePart(s []uint8) Uint8x32 { + if len(s) == 0 { + var zero Uint8x32 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x32SlicePart(t).AsUint8x32() +} + +// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. +func LoadUint16x16SlicePart(s []uint16) Uint16x16 { + if len(s) == 0 { + var zero Uint16x16 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x16SlicePart(t).AsUint16x16() +} + +/* unsigned versions of integer slice part stores*/ + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x16) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x16().StoreSlicePart(t) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x8) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x8().StoreSlicePart(t) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x32) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x32().StoreSlicePart(t) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x16) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x16().StoreSlicePart(t) +} + +/* 256-bit int vector loads and stores made from 128-bit parts */ + +// LoadInt8x32SlicePart loads a Int8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadInt8x32Slice. +func LoadInt8x32SlicePart(s []int8) Int8x32 { + l := len(s) + if l >= 32 { + return LoadInt8x32Slice(s) + } + var x Int8x32 + if l == 0 { + return x + } + if l > 16 { + return x.Set128(0, LoadInt8x16Slice(s)).Set128(1, LoadInt8x16SlicePart(s[16:])) + } else { + return x.Set128(0, LoadInt8x16SlicePart(s)) + } +} + +// LoadInt16x16SlicePart loads a Int16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt16x16Slice. +func LoadInt16x16SlicePart(s []int16) Int16x16 { + l := len(s) + if l >= 16 { + return LoadInt16x16Slice(s) + } + var x Int16x16 + if l == 0 { + return x + } + if l > 8 { + return x.Set128(0, LoadInt16x8Slice(s)).Set128(1, LoadInt16x8SlicePart(s[8:])) + } else { + return x.Set128(0, LoadInt16x8SlicePart(s)) + } +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x32) StoreSlicePart(s []int8) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l > 16 { + x.Get128(0).StoreSlice(s) + x.Get128(1).StoreSlicePart(s[16:]) + } else { // fits in one + x.Get128(0).StoreSlicePart(s) + } +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x16) StoreSlicePart(s []int16) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l > 8 { + x.Get128(0).StoreSlice(s) + x.Get128(1).StoreSlicePart(s[8:]) + } else { // fits in one + x.Get128(0).StoreSlicePart(s) + } +} + +/* 128-bit vector load and store slice parts for 8 and 16-bit int elements */ + +// LoadInt8x16SlicePart loads a Int8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt8x16Slice. +func LoadInt8x16SlicePart(s []int8) Int8x16 { + l := len(s) + if l >= 16 { + return LoadInt8x16Slice(s) + } + var x Int8x16 + if l == 0 { + return x + } + if l >= 8 { // 8-15 + x = x.AsInt64x2().SetElem(0, *int64atP8(&s[0])).AsInt8x16() + if l >= 12 { // 12, 13, 14, 15 + x = x.AsInt32x4().SetElem(8/4, *int32atP8(&s[8])).AsInt8x16() + if l >= 14 { + x = x.AsInt16x8().SetElem(12/2, *int16atP8(&s[12])).AsInt8x16() + if l == 15 { + x = x.SetElem(14, s[14]) + } + } else if l == 13 { + x = x.SetElem(12, s[12]) + } + } else if l >= 10 { // 10, 11 + x = x.AsInt16x8().SetElem(8/2, *int16atP8(&s[8])).AsInt8x16() + if l == 11 { + x = x.SetElem(10, s[10]) + } + } else if l == 9 { + x = x.SetElem(8, s[8]) + } + } else if l >= 4 { // 4-7 + x = x.AsInt32x4().SetElem(0, *int32atP8(&s[0])).AsInt8x16() + if l >= 6 { + x = x.AsInt16x8().SetElem(4/2, *int16atP8(&s[4])).AsInt8x16() + if l == 7 { + x = x.SetElem(6, s[6]) + } + } else if l == 5 { + x = x.SetElem(4, s[4]) + } + } else if l >= 2 { // 2,3 + x = x.AsInt16x8().SetElem(0, *int16atP8(&s[0])).AsInt8x16() + if l == 3 { + x = x.SetElem(2, s[2]) + } + } else { // l == 1 + x = x.SetElem(0, s[0]) + } + return x +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x16) StoreSlicePart(s []int8) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l >= 8 { // 8-15 + *int64atP8(&s[0]) = x.AsInt64x2().GetElem(0) + if l >= 12 { // 12, 13, 14, 15 + *int32atP8(&s[8]) = x.AsInt32x4().GetElem(8 / 4) + if l >= 14 { + *int16atP8(&s[12]) = x.AsInt16x8().GetElem(12 / 2) + if l == 15 { + s[14] = x.GetElem(14) + } + } else if l == 13 { + s[12] = x.GetElem(12) + } + } else if l >= 10 { // 10, 11 + *int16atP8(&s[8]) = x.AsInt16x8().GetElem(8 / 2) + if l == 11 { + s[10] = x.GetElem(10) + } + } else if l == 9 { + s[8] = x.GetElem(8) + } + } else if l >= 4 { // 4-7 + *int32atP8(&s[0]) = x.AsInt32x4().GetElem(0) + if l >= 6 { + *int16atP8(&s[4]) = x.AsInt16x8().GetElem(4 / 2) + if l == 7 { + s[6] = x.GetElem(6) + } + } else if l == 5 { + s[4] = x.GetElem(4) + } + } else if l >= 2 { // 2,3 + *int16atP8(&s[0]) = x.AsInt16x8().GetElem(0) + if l == 3 { + s[2] = x.GetElem(2) + } + } else { // l == 1 + s[0] = x.GetElem(0) + } +} + +// LoadInt16x8SlicePart loads a Int16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt16x8Slice. +func LoadInt16x8SlicePart(s []int16) Int16x8 { + l := len(s) + if l >= 8 { + return LoadInt16x8Slice(s) + } + var x Int16x8 + if l == 0 { + return x + } + if l >= 4 { // 4-7 + x = x.AsInt64x2().SetElem(0, *int64atP16(&s[0])).AsInt16x8() + if l >= 6 { + x = x.AsInt32x4().SetElem(4/2, *int32atP16(&s[4])).AsInt16x8() + if l == 7 { + x = x.SetElem(6, s[6]) + } + } else if l == 5 { + x = x.SetElem(4, s[4]) + } + } else if l >= 2 { // 2,3 + x = x.AsInt32x4().SetElem(0, *int32atP16(&s[0])).AsInt16x8() + if l == 3 { + x = x.SetElem(2, s[2]) + } + } else { // l == 1 + x = x.SetElem(0, s[0]) + } + return x +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x8) StoreSlicePart(s []int16) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l >= 4 { // 4-7 + *int64atP16(&s[0]) = x.AsInt64x2().GetElem(0) + if l >= 6 { + *int32atP16(&s[4]) = x.AsInt32x4().GetElem(4 / 2) + if l == 7 { + s[6] = x.GetElem(6) + } + } else if l == 5 { + s[4] = x.GetElem(4) + } + } else if l >= 2 { // 2,3 + *int32atP16(&s[0]) = x.AsInt32x4().GetElem(0) + if l == 3 { + s[2] = x.GetElem(2) + } + } else { // l == 1 + s[0] = x.GetElem(0) + } + return +} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go new file mode 100644 index 00000000000000..8f10ea630b726f --- /dev/null +++ b/src/simd/slicepart_test.go @@ -0,0 +1,186 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestSlicePartInt8x16(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadInt8x16SlicePart(a[:i]) + c := make([]int8, 32, 32) + u.StoreSlice(c) + checkInt8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt8x32(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + u := simd.LoadInt8x32SlicePart(a[:i]) + c := make([]int8, 32, 32) + u.StoreSlice(c) + checkInt8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartUint8x16(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadUint8x16SlicePart(a[:i]) + c := make([]uint8, 32, 32) + u.StoreSlice(c) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartUint8x32(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + u := simd.LoadUint8x32SlicePart(a[:i]) + c := make([]uint8, 32, 32) + u.StoreSlice(c) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt16x8(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8} + for i := 8; i >= 0; i-- { + u := simd.LoadInt16x8SlicePart(a[:i]) + c := make([]int16, 16, 16) + u.StoreSlice(c) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt16x16(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadInt16x16SlicePart(a[:i]) + c := make([]int16, 16, 16) + u.StoreSlice(c) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt8x16(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadInt8x16Slice(a) + c := make([]int8, 32, 32) + v.StoreSlicePart(c[:i]) + checkInt8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt16x8(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8} + for i := 8; i >= 0; i-- { + v := simd.LoadInt16x8Slice(a) + c := make([]int16, 32, 32) + v.StoreSlicePart(c[:i]) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt16x16(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadInt16x16Slice(a) + c := make([]int16, 32, 32) + v.StoreSlicePart(c[:i]) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint8x16(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadUint8x16Slice(a) + c := make([]uint8, 32, 32) + v.StoreSlicePart(c[:i]) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint16x16(t *testing.T) { + a := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadUint16x16Slice(a) + c := make([]uint16, 32, 32) + v.StoreSlicePart(c[:i]) + checkUint16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint8x32(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + v := simd.LoadUint8x32Slice(a) + c := make([]uint8, 32, 32) + v.StoreSlicePart(c[:i]) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} From 0870ed04a3632b62fdd76fdac0bcf091cc55ac68 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 02:41:33 +0000 Subject: [PATCH 065/139] [dev.simd] cmd/compile: make compares between NaNs all false. This CL updates the predicate immediate value of Equal, GreaterEqual, Greater. This CL is generated by Cl 686215. Change-Id: I77fc411f40f5c790a1be7f3d5ffd11f12df50ec7 Reviewed-on: https://go-review.googlesource.com/c/go/+/686235 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssa/_gen/simdAMD64.rules | 226 ++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 452 +++++++++--------- 2 files changed, 339 insertions(+), 339 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6ba52a9e9c9f93..757020b6c966b1 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -242,66 +242,66 @@ (GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) (GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) -(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) -(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) -(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) -(GreaterFloat64x2 x y) => (VCMPPD128 [6] x y) -(GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) -(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) +(GreaterFloat32x4 x y) => (VCMPPS128 [14] x y) +(GreaterFloat32x8 x y) => (VCMPPS256 [14] x y) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) +(GreaterFloat64x2 x y) => (VCMPPD128 [14] x y) +(GreaterFloat64x4 x y) => (VCMPPD256 [14] x y) +(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) (GreaterInt8x16 ...) => (VPCMPGTB128 ...) (GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) (GreaterInt16x8 ...) => (VPCMPGTW128 ...) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) -(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) -(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) -(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) -(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) -(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) -(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) -(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) -(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) -(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) -(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y) -(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y) -(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) -(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y) -(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y) -(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) -(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) -(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) -(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) -(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) -(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) -(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) -(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) -(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) -(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) -(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) -(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) -(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) -(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) -(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) -(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) -(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) -(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) -(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) +(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) +(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) +(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) +(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) +(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) +(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) +(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) +(GreaterEqualFloat32x4 x y) => (VCMPPS128 [13] x y) +(GreaterEqualFloat32x8 x y) => (VCMPPS256 [13] x y) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) +(GreaterEqualFloat64x2 x y) => (VCMPPD128 [13] x y) +(GreaterEqualFloat64x4 x y) => (VCMPPD256 [13] x y) +(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) +(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) +(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) +(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) +(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) +(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) +(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) +(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) +(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) +(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) +(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) +(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) +(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) +(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) +(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) (IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) @@ -563,66 +563,66 @@ (MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) (MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) (MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) (MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 1cf23c4ec5b0e4..6e0726de9b30d2 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -31275,13 +31275,13 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) + // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31291,12 +31291,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [5] x y) + // result: (VCMPPS128 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31305,12 +31305,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [5] x y) + // result: (VCMPPS256 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31319,12 +31319,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [5] x y) + // result: (VCMPPD128 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31333,12 +31333,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [5] x y) + // result: (VCMPPD256 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31349,13 +31349,13 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) + // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31367,13 +31367,13 @@ func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) + // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31385,13 +31385,13 @@ func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31403,13 +31403,13 @@ func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) + // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31421,13 +31421,13 @@ func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31439,13 +31439,13 @@ func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) + // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31457,13 +31457,13 @@ func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) + // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31475,13 +31475,13 @@ func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31493,13 +31493,13 @@ func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31511,13 +31511,13 @@ func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31529,13 +31529,13 @@ func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) + // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31547,13 +31547,13 @@ func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) + // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31565,13 +31565,13 @@ func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31583,13 +31583,13 @@ func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31601,13 +31601,13 @@ func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31619,13 +31619,13 @@ func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31637,13 +31637,13 @@ func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31655,13 +31655,13 @@ func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31673,13 +31673,13 @@ func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31691,13 +31691,13 @@ func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31709,13 +31709,13 @@ func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31727,13 +31727,13 @@ func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31745,13 +31745,13 @@ func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31763,13 +31763,13 @@ func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31781,13 +31781,13 @@ func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31799,13 +31799,13 @@ func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) + // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31815,12 +31815,12 @@ func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [6] x y) + // result: (VCMPPS128 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31829,12 +31829,12 @@ func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [6] x y) + // result: (VCMPPS256 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31843,12 +31843,12 @@ func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [6] x y) + // result: (VCMPPD128 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31857,12 +31857,12 @@ func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [6] x y) + // result: (VCMPPD256 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31873,13 +31873,13 @@ func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) + // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31891,13 +31891,13 @@ func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31909,13 +31909,13 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31927,13 +31927,13 @@ func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31945,13 +31945,13 @@ func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31963,13 +31963,13 @@ func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31981,13 +31981,13 @@ func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31999,13 +31999,13 @@ func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32017,13 +32017,13 @@ func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32035,13 +32035,13 @@ func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32053,13 +32053,13 @@ func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32071,13 +32071,13 @@ func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32089,13 +32089,13 @@ func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32107,13 +32107,13 @@ func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32125,13 +32125,13 @@ func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32143,13 +32143,13 @@ func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32161,13 +32161,13 @@ func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32179,13 +32179,13 @@ func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38162,14 +38162,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38184,14 +38184,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38206,14 +38206,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38228,14 +38228,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38250,14 +38250,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38272,14 +38272,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38294,14 +38294,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38316,14 +38316,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38338,14 +38338,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38360,14 +38360,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38382,14 +38382,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38404,14 +38404,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38426,14 +38426,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38448,14 +38448,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38470,14 +38470,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38492,14 +38492,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38514,14 +38514,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38536,14 +38536,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38558,14 +38558,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38580,14 +38580,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38602,14 +38602,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38624,14 +38624,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38646,14 +38646,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38668,14 +38668,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38690,14 +38690,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38712,14 +38712,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38734,14 +38734,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38756,14 +38756,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38778,14 +38778,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38800,14 +38800,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38822,14 +38822,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38844,14 +38844,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38866,14 +38866,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38888,14 +38888,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38910,14 +38910,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38932,14 +38932,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38954,14 +38954,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38976,14 +38976,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38998,14 +38998,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39020,14 +39020,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39042,14 +39042,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39064,14 +39064,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39086,14 +39086,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39108,14 +39108,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39130,14 +39130,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39152,14 +39152,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39174,14 +39174,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39196,14 +39196,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39218,14 +39218,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39240,14 +39240,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39262,14 +39262,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39284,14 +39284,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39306,14 +39306,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39328,14 +39328,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39350,14 +39350,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39372,14 +39372,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39394,14 +39394,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39416,14 +39416,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39438,14 +39438,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39460,14 +39460,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) From 56ca67682b4ee3baa4f1ab3b1bd1a0872a874ae8 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 17:26:59 +0000 Subject: [PATCH 066/139] [dev.simd] cmd/compile, simd: remove FP bitwise logic operations. This CL is generated by CL 686555. Change-Id: I0efb86a919692cd97c1c5b6365d77361a30bf7cf Reviewed-on: https://go-review.googlesource.com/c/go/+/686496 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 72 -- .../compile/internal/ssa/_gen/simdAMD64.rules | 48 - .../compile/internal/ssa/_gen/simdAMD64ops.go | 48 - .../internal/ssa/_gen/simdgenericOps.go | 48 - src/cmd/compile/internal/ssa/opGen.go | 1104 ----------------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 552 --------- .../compile/internal/ssagen/simdintrinsics.go | 48 - src/simd/simd_wrapped_test.go | 96 -- src/simd/stubs_amd64.go | 240 ---- 9 files changed, 2256 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index fbb63ccaa14386..2266f8d7ef204d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -78,22 +78,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, ssa.OpAMD64VADDSUBPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VANDPD512, ssa.OpAMD64VPAND128, ssa.OpAMD64VPAND256, ssa.OpAMD64VPANDD512, ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VANDNPD512, ssa.OpAMD64VPANDN128, ssa.OpAMD64VPANDN256, ssa.OpAMD64VPANDND512, @@ -221,12 +209,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQ128, ssa.OpAMD64VPMULLQ256, ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VORPD512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, @@ -332,12 +314,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQ128, ssa.OpAMD64VPSUBQ256, ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VXORPD512, ssa.OpAMD64VPXOR128, ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, @@ -362,24 +338,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VANDPDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VANDNPDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, ssa.OpAMD64VPANDNDMasked512, @@ -494,12 +458,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VORPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -581,12 +539,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VXORPDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, @@ -999,24 +951,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VANDPDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VANDNPDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, ssa.OpAMD64VPANDNDMasked512, @@ -1179,12 +1119,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VORPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -1353,12 +1287,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VXORPDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 757020b6c966b1..bcd227d4b99888 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -46,12 +46,6 @@ (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) (AddSubFloat64x4 ...) => (VADDSUBPD256 ...) -(AndFloat32x4 ...) => (VANDPS128 ...) -(AndFloat32x8 ...) => (VANDPS256 ...) -(AndFloat32x16 ...) => (VANDPS512 ...) -(AndFloat64x2 ...) => (VANDPD128 ...) -(AndFloat64x4 ...) => (VANDPD256 ...) -(AndFloat64x8 ...) => (VANDPD512 ...) (AndInt8x16 ...) => (VPAND128 ...) (AndInt8x32 ...) => (VPAND256 ...) (AndInt16x8 ...) => (VPAND128 ...) @@ -72,12 +66,6 @@ (AndUint64x2 ...) => (VPAND128 ...) (AndUint64x4 ...) => (VPAND256 ...) (AndUint64x8 ...) => (VPANDQ512 ...) -(AndNotFloat32x4 ...) => (VANDNPS128 ...) -(AndNotFloat32x8 ...) => (VANDNPS256 ...) -(AndNotFloat32x16 ...) => (VANDNPS512 ...) -(AndNotFloat64x2 ...) => (VANDNPD128 ...) -(AndNotFloat64x4 ...) => (VANDNPD256 ...) -(AndNotFloat64x8 ...) => (VANDNPD512 ...) (AndNotInt8x16 ...) => (VPANDN128 ...) (AndNotInt8x32 ...) => (VPANDN256 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) @@ -410,12 +398,6 @@ (MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -428,12 +410,6 @@ (MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -812,12 +788,6 @@ (MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -1139,12 +1109,6 @@ (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -1284,12 +1248,6 @@ (NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) (NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) -(OrFloat32x4 ...) => (VORPS128 ...) -(OrFloat32x8 ...) => (VORPS256 ...) -(OrFloat32x16 ...) => (VORPS512 ...) -(OrFloat64x2 ...) => (VORPD128 ...) -(OrFloat64x4 ...) => (VORPD256 ...) -(OrFloat64x8 ...) => (VORPD512 ...) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) (OrInt16x8 ...) => (VPOR128 ...) @@ -1699,12 +1657,6 @@ (UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) -(XorFloat32x4 ...) => (VXORPS128 ...) -(XorFloat32x8 ...) => (VXORPS256 ...) -(XorFloat32x16 ...) => (VXORPS512 ...) -(XorFloat64x2 ...) => (VXORPD128 ...) -(XorFloat64x4 ...) => (VXORPD256 ...) -(XorFloat64x8 ...) => (VXORPD512 ...) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 8c895d9f455950..892ecc40431839 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -4,8 +4,6 @@ package main func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -13,8 +11,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -25,22 +21,16 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -48,8 +38,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -60,24 +48,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPS128", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -85,8 +67,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -97,24 +77,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPS256", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -122,8 +96,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -134,24 +106,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPD128", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -159,8 +125,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -171,23 +135,17 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPD256", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -195,8 +153,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -207,18 +163,14 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index c74893b97a210f..54c247eab19932 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -4,8 +4,6 @@ package main func simdGenericOps() []opData { return []opData{ {name: "AddFloat32x16", argLength: 2, commutative: true}, - {name: "AndFloat32x16", argLength: 2, commutative: true}, - {name: "AndNotFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, @@ -19,8 +17,6 @@ func simdGenericOps() []opData { {name: "LessFloat32x16", argLength: 2, commutative: false}, {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedAndFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, @@ -38,23 +34,17 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat32x16", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float32x16", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedOrFloat32x16", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedSubFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedXorFloat32x16", argLength: 3, commutative: true}, {name: "MaxFloat32x16", argLength: 2, commutative: true}, {name: "MinFloat32x16", argLength: 2, commutative: true}, {name: "MulFloat32x16", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, - {name: "OrFloat32x16", argLength: 2, commutative: true}, {name: "SqrtFloat32x16", argLength: 1, commutative: false}, {name: "SubFloat32x16", argLength: 2, commutative: false}, - {name: "XorFloat32x16", argLength: 2, commutative: true}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, - {name: "AndFloat32x4", argLength: 2, commutative: true}, - {name: "AndNotFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, @@ -70,8 +60,6 @@ func simdGenericOps() []opData { {name: "LessFloat32x4", argLength: 2, commutative: false}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedAndFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, @@ -89,27 +77,21 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat32x4", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float32x4", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedOrFloat32x4", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedSubFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedXorFloat32x4", argLength: 3, commutative: true}, {name: "MaxFloat32x4", argLength: 2, commutative: true}, {name: "MinFloat32x4", argLength: 2, commutative: true}, {name: "MulFloat32x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, - {name: "OrFloat32x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, {name: "RoundFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, {name: "SubFloat32x4", argLength: 2, commutative: false}, {name: "TruncFloat32x4", argLength: 1, commutative: false}, - {name: "XorFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, - {name: "AndFloat32x8", argLength: 2, commutative: true}, - {name: "AndNotFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, @@ -125,8 +107,6 @@ func simdGenericOps() []opData { {name: "LessFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedAndFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, @@ -144,27 +124,21 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat32x8", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float32x8", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedOrFloat32x8", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedSubFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedXorFloat32x8", argLength: 3, commutative: true}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, {name: "MinFloat32x8", argLength: 2, commutative: true}, {name: "MulFloat32x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, - {name: "OrFloat32x8", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, {name: "SubFloat32x8", argLength: 2, commutative: false}, {name: "TruncFloat32x8", argLength: 1, commutative: false}, - {name: "XorFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat64x2", argLength: 2, commutative: true}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, - {name: "AndFloat64x2", argLength: 2, commutative: true}, - {name: "AndNotFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, @@ -181,8 +155,6 @@ func simdGenericOps() []opData { {name: "LessFloat64x2", argLength: 2, commutative: false}, {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedAndFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, @@ -200,27 +172,21 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat64x2", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float64x2", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedOrFloat64x2", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedSubFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedXorFloat64x2", argLength: 3, commutative: true}, {name: "MaxFloat64x2", argLength: 2, commutative: true}, {name: "MinFloat64x2", argLength: 2, commutative: true}, {name: "MulFloat64x2", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, - {name: "OrFloat64x2", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "SqrtFloat64x2", argLength: 1, commutative: false}, {name: "SubFloat64x2", argLength: 2, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, - {name: "XorFloat64x2", argLength: 2, commutative: true}, {name: "AddFloat64x4", argLength: 2, commutative: true}, {name: "AddSubFloat64x4", argLength: 2, commutative: false}, - {name: "AndFloat64x4", argLength: 2, commutative: true}, - {name: "AndNotFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, @@ -236,8 +202,6 @@ func simdGenericOps() []opData { {name: "LessFloat64x4", argLength: 2, commutative: false}, {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedAndFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, @@ -255,26 +219,20 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat64x4", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float64x4", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedOrFloat64x4", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedSubFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedXorFloat64x4", argLength: 3, commutative: true}, {name: "MaxFloat64x4", argLength: 2, commutative: true}, {name: "MinFloat64x4", argLength: 2, commutative: true}, {name: "MulFloat64x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, - {name: "OrFloat64x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, {name: "RoundFloat64x4", argLength: 1, commutative: false}, {name: "SqrtFloat64x4", argLength: 1, commutative: false}, {name: "SubFloat64x4", argLength: 2, commutative: false}, {name: "TruncFloat64x4", argLength: 1, commutative: false}, - {name: "XorFloat64x4", argLength: 2, commutative: true}, {name: "AddFloat64x8", argLength: 2, commutative: true}, - {name: "AndFloat64x8", argLength: 2, commutative: true}, - {name: "AndNotFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, @@ -288,8 +246,6 @@ func simdGenericOps() []opData { {name: "LessFloat64x8", argLength: 2, commutative: false}, {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedAndFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, @@ -307,19 +263,15 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat64x8", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float64x8", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedOrFloat64x8", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedSubFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedXorFloat64x8", argLength: 3, commutative: true}, {name: "MaxFloat64x8", argLength: 2, commutative: true}, {name: "MinFloat64x8", argLength: 2, commutative: true}, {name: "MulFloat64x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, - {name: "OrFloat64x8", argLength: 2, commutative: true}, {name: "SqrtFloat64x8", argLength: 1, commutative: false}, {name: "SubFloat64x8", argLength: 2, commutative: false}, - {name: "XorFloat64x8", argLength: 2, commutative: true}, {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, {name: "AddInt16x16", argLength: 2, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 91380e5e089e31..48428ead1f5735 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1197,8 +1197,6 @@ const ( OpAMD64Zero256 OpAMD64Zero512 OpAMD64VADDPS512 - OpAMD64VANDPS512 - OpAMD64VANDNPS512 OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 @@ -1206,8 +1204,6 @@ const ( OpAMD64VFMADDSUB213PS512 OpAMD64VFMSUBADD213PS512 OpAMD64VADDPSMasked512 - OpAMD64VANDPSMasked512 - OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PSMasked512 OpAMD64VDIVPSMasked512 @@ -1218,22 +1214,16 @@ const ( OpAMD64VMINPSMasked512 OpAMD64VMULPSMasked512 OpAMD64VSCALEFPSMasked512 - OpAMD64VORPSMasked512 OpAMD64VSQRTPSMasked512 OpAMD64VSUBPSMasked512 - OpAMD64VXORPSMasked512 OpAMD64VMAXPS512 OpAMD64VMINPS512 OpAMD64VMULPS512 OpAMD64VSCALEFPS512 - OpAMD64VORPS512 OpAMD64VSQRTPS512 OpAMD64VSUBPS512 - OpAMD64VXORPS512 OpAMD64VADDPS128 OpAMD64VADDSUBPS128 - OpAMD64VANDPS128 - OpAMD64VANDNPS128 OpAMD64VRCP14PS128 OpAMD64VRSQRTPS128 OpAMD64VDIVPS128 @@ -1241,8 +1231,6 @@ const ( OpAMD64VFMADDSUB213PS128 OpAMD64VFMSUBADD213PS128 OpAMD64VADDPSMasked128 - OpAMD64VANDPSMasked128 - OpAMD64VANDNPSMasked128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRT14PSMasked128 OpAMD64VDIVPSMasked128 @@ -1253,24 +1241,18 @@ const ( OpAMD64VMINPSMasked128 OpAMD64VMULPSMasked128 OpAMD64VSCALEFPSMasked128 - OpAMD64VORPSMasked128 OpAMD64VSQRTPSMasked128 OpAMD64VSUBPSMasked128 - OpAMD64VXORPSMasked128 OpAMD64VMAXPS128 OpAMD64VMINPS128 OpAMD64VMULPS128 OpAMD64VSCALEFPS128 - OpAMD64VORPS128 OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 OpAMD64VSUBPS128 - OpAMD64VXORPS128 OpAMD64VADDPS256 OpAMD64VADDSUBPS256 - OpAMD64VANDPS256 - OpAMD64VANDNPS256 OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 @@ -1278,8 +1260,6 @@ const ( OpAMD64VFMADDSUB213PS256 OpAMD64VFMSUBADD213PS256 OpAMD64VADDPSMasked256 - OpAMD64VANDPSMasked256 - OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRT14PSMasked256 OpAMD64VDIVPSMasked256 @@ -1290,24 +1270,18 @@ const ( OpAMD64VMINPSMasked256 OpAMD64VMULPSMasked256 OpAMD64VSCALEFPSMasked256 - OpAMD64VORPSMasked256 OpAMD64VSQRTPSMasked256 OpAMD64VSUBPSMasked256 - OpAMD64VXORPSMasked256 OpAMD64VMAXPS256 OpAMD64VMINPS256 OpAMD64VMULPS256 OpAMD64VSCALEFPS256 - OpAMD64VORPS256 OpAMD64VHADDPS256 OpAMD64VHSUBPS256 OpAMD64VSQRTPS256 OpAMD64VSUBPS256 - OpAMD64VXORPS256 OpAMD64VADDPD128 OpAMD64VADDSUBPD128 - OpAMD64VANDPD128 - OpAMD64VANDNPD128 OpAMD64VRCP14PD128 OpAMD64VRSQRT14PD128 OpAMD64VDIVPD128 @@ -1315,8 +1289,6 @@ const ( OpAMD64VFMADDSUB213PD128 OpAMD64VFMSUBADD213PD128 OpAMD64VADDPDMasked128 - OpAMD64VANDPDMasked128 - OpAMD64VANDNPDMasked128 OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PDMasked128 OpAMD64VDIVPDMasked128 @@ -1327,24 +1299,18 @@ const ( OpAMD64VMINPDMasked128 OpAMD64VMULPDMasked128 OpAMD64VSCALEFPDMasked128 - OpAMD64VORPDMasked128 OpAMD64VSQRTPDMasked128 OpAMD64VSUBPDMasked128 - OpAMD64VXORPDMasked128 OpAMD64VMAXPD128 OpAMD64VMINPD128 OpAMD64VMULPD128 OpAMD64VSCALEFPD128 - OpAMD64VORPD128 OpAMD64VHADDPD128 OpAMD64VHSUBPD128 OpAMD64VSQRTPD128 OpAMD64VSUBPD128 - OpAMD64VXORPD128 OpAMD64VADDPD256 OpAMD64VADDSUBPD256 - OpAMD64VANDPD256 - OpAMD64VANDNPD256 OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 @@ -1352,8 +1318,6 @@ const ( OpAMD64VFMADDSUB213PD256 OpAMD64VFMSUBADD213PD256 OpAMD64VADDPDMasked256 - OpAMD64VANDPDMasked256 - OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PDMasked256 OpAMD64VDIVPDMasked256 @@ -1364,23 +1328,17 @@ const ( OpAMD64VMINPDMasked256 OpAMD64VMULPDMasked256 OpAMD64VSCALEFPDMasked256 - OpAMD64VORPDMasked256 OpAMD64VSQRTPDMasked256 OpAMD64VSUBPDMasked256 - OpAMD64VXORPDMasked256 OpAMD64VMAXPD256 OpAMD64VMINPD256 OpAMD64VMULPD256 OpAMD64VSCALEFPD256 - OpAMD64VORPD256 OpAMD64VHADDPD256 OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 OpAMD64VSUBPD256 - OpAMD64VXORPD256 OpAMD64VADDPD512 - OpAMD64VANDPD512 - OpAMD64VANDNPD512 OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 @@ -1388,8 +1346,6 @@ const ( OpAMD64VFMADDSUB213PD512 OpAMD64VFMSUBADD213PD512 OpAMD64VADDPDMasked512 - OpAMD64VANDPDMasked512 - OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PDMasked512 OpAMD64VDIVPDMasked512 @@ -1400,18 +1356,14 @@ const ( OpAMD64VMINPDMasked512 OpAMD64VMULPDMasked512 OpAMD64VSCALEFPDMasked512 - OpAMD64VORPDMasked512 OpAMD64VSQRTPDMasked512 OpAMD64VSUBPDMasked512 - OpAMD64VXORPDMasked512 OpAMD64VMAXPD512 OpAMD64VMINPD512 OpAMD64VMULPD512 OpAMD64VSCALEFPD512 - OpAMD64VORPD512 OpAMD64VSQRTPD512 OpAMD64VSUBPD512 - OpAMD64VXORPD512 OpAMD64VPABSW256 OpAMD64VPADDW256 OpAMD64VPCMPEQW256 @@ -4341,8 +4293,6 @@ const ( OpAdd32x4 OpZeroSIMD OpAddFloat32x16 - OpAndFloat32x16 - OpAndNotFloat32x16 OpApproximateReciprocalFloat32x16 OpApproximateReciprocalOfSqrtFloat32x16 OpDivFloat32x16 @@ -4356,8 +4306,6 @@ const ( OpLessFloat32x16 OpLessEqualFloat32x16 OpMaskedAddFloat32x16 - OpMaskedAndFloat32x16 - OpMaskedAndNotFloat32x16 OpMaskedApproximateReciprocalFloat32x16 OpMaskedApproximateReciprocalOfSqrtFloat32x16 OpMaskedDivFloat32x16 @@ -4375,23 +4323,17 @@ const ( OpMaskedMulFloat32x16 OpMaskedMulByPowOf2Float32x16 OpMaskedNotEqualFloat32x16 - OpMaskedOrFloat32x16 OpMaskedSqrtFloat32x16 OpMaskedSubFloat32x16 - OpMaskedXorFloat32x16 OpMaxFloat32x16 OpMinFloat32x16 OpMulFloat32x16 OpMulByPowOf2Float32x16 OpNotEqualFloat32x16 - OpOrFloat32x16 OpSqrtFloat32x16 OpSubFloat32x16 - OpXorFloat32x16 OpAddFloat32x4 OpAddSubFloat32x4 - OpAndFloat32x4 - OpAndNotFloat32x4 OpApproximateReciprocalFloat32x4 OpApproximateReciprocalOfSqrtFloat32x4 OpCeilFloat32x4 @@ -4407,8 +4349,6 @@ const ( OpLessFloat32x4 OpLessEqualFloat32x4 OpMaskedAddFloat32x4 - OpMaskedAndFloat32x4 - OpMaskedAndNotFloat32x4 OpMaskedApproximateReciprocalFloat32x4 OpMaskedApproximateReciprocalOfSqrtFloat32x4 OpMaskedDivFloat32x4 @@ -4426,27 +4366,21 @@ const ( OpMaskedMulFloat32x4 OpMaskedMulByPowOf2Float32x4 OpMaskedNotEqualFloat32x4 - OpMaskedOrFloat32x4 OpMaskedSqrtFloat32x4 OpMaskedSubFloat32x4 - OpMaskedXorFloat32x4 OpMaxFloat32x4 OpMinFloat32x4 OpMulFloat32x4 OpMulByPowOf2Float32x4 OpNotEqualFloat32x4 - OpOrFloat32x4 OpPairwiseAddFloat32x4 OpPairwiseSubFloat32x4 OpRoundFloat32x4 OpSqrtFloat32x4 OpSubFloat32x4 OpTruncFloat32x4 - OpXorFloat32x4 OpAddFloat32x8 OpAddSubFloat32x8 - OpAndFloat32x8 - OpAndNotFloat32x8 OpApproximateReciprocalFloat32x8 OpApproximateReciprocalOfSqrtFloat32x8 OpCeilFloat32x8 @@ -4462,8 +4396,6 @@ const ( OpLessFloat32x8 OpLessEqualFloat32x8 OpMaskedAddFloat32x8 - OpMaskedAndFloat32x8 - OpMaskedAndNotFloat32x8 OpMaskedApproximateReciprocalFloat32x8 OpMaskedApproximateReciprocalOfSqrtFloat32x8 OpMaskedDivFloat32x8 @@ -4481,27 +4413,21 @@ const ( OpMaskedMulFloat32x8 OpMaskedMulByPowOf2Float32x8 OpMaskedNotEqualFloat32x8 - OpMaskedOrFloat32x8 OpMaskedSqrtFloat32x8 OpMaskedSubFloat32x8 - OpMaskedXorFloat32x8 OpMaxFloat32x8 OpMinFloat32x8 OpMulFloat32x8 OpMulByPowOf2Float32x8 OpNotEqualFloat32x8 - OpOrFloat32x8 OpPairwiseAddFloat32x8 OpPairwiseSubFloat32x8 OpRoundFloat32x8 OpSqrtFloat32x8 OpSubFloat32x8 OpTruncFloat32x8 - OpXorFloat32x8 OpAddFloat64x2 OpAddSubFloat64x2 - OpAndFloat64x2 - OpAndNotFloat64x2 OpApproximateReciprocalFloat64x2 OpApproximateReciprocalOfSqrtFloat64x2 OpCeilFloat64x2 @@ -4518,8 +4444,6 @@ const ( OpLessFloat64x2 OpLessEqualFloat64x2 OpMaskedAddFloat64x2 - OpMaskedAndFloat64x2 - OpMaskedAndNotFloat64x2 OpMaskedApproximateReciprocalFloat64x2 OpMaskedApproximateReciprocalOfSqrtFloat64x2 OpMaskedDivFloat64x2 @@ -4537,27 +4461,21 @@ const ( OpMaskedMulFloat64x2 OpMaskedMulByPowOf2Float64x2 OpMaskedNotEqualFloat64x2 - OpMaskedOrFloat64x2 OpMaskedSqrtFloat64x2 OpMaskedSubFloat64x2 - OpMaskedXorFloat64x2 OpMaxFloat64x2 OpMinFloat64x2 OpMulFloat64x2 OpMulByPowOf2Float64x2 OpNotEqualFloat64x2 - OpOrFloat64x2 OpPairwiseAddFloat64x2 OpPairwiseSubFloat64x2 OpRoundFloat64x2 OpSqrtFloat64x2 OpSubFloat64x2 OpTruncFloat64x2 - OpXorFloat64x2 OpAddFloat64x4 OpAddSubFloat64x4 - OpAndFloat64x4 - OpAndNotFloat64x4 OpApproximateReciprocalFloat64x4 OpApproximateReciprocalOfSqrtFloat64x4 OpCeilFloat64x4 @@ -4573,8 +4491,6 @@ const ( OpLessFloat64x4 OpLessEqualFloat64x4 OpMaskedAddFloat64x4 - OpMaskedAndFloat64x4 - OpMaskedAndNotFloat64x4 OpMaskedApproximateReciprocalFloat64x4 OpMaskedApproximateReciprocalOfSqrtFloat64x4 OpMaskedDivFloat64x4 @@ -4592,26 +4508,20 @@ const ( OpMaskedMulFloat64x4 OpMaskedMulByPowOf2Float64x4 OpMaskedNotEqualFloat64x4 - OpMaskedOrFloat64x4 OpMaskedSqrtFloat64x4 OpMaskedSubFloat64x4 - OpMaskedXorFloat64x4 OpMaxFloat64x4 OpMinFloat64x4 OpMulFloat64x4 OpMulByPowOf2Float64x4 OpNotEqualFloat64x4 - OpOrFloat64x4 OpPairwiseAddFloat64x4 OpPairwiseSubFloat64x4 OpRoundFloat64x4 OpSqrtFloat64x4 OpSubFloat64x4 OpTruncFloat64x4 - OpXorFloat64x4 OpAddFloat64x8 - OpAndFloat64x8 - OpAndNotFloat64x8 OpApproximateReciprocalFloat64x8 OpApproximateReciprocalOfSqrtFloat64x8 OpDivFloat64x8 @@ -4625,8 +4535,6 @@ const ( OpLessFloat64x8 OpLessEqualFloat64x8 OpMaskedAddFloat64x8 - OpMaskedAndFloat64x8 - OpMaskedAndNotFloat64x8 OpMaskedApproximateReciprocalFloat64x8 OpMaskedApproximateReciprocalOfSqrtFloat64x8 OpMaskedDivFloat64x8 @@ -4644,19 +4552,15 @@ const ( OpMaskedMulFloat64x8 OpMaskedMulByPowOf2Float64x8 OpMaskedNotEqualFloat64x8 - OpMaskedOrFloat64x8 OpMaskedSqrtFloat64x8 OpMaskedSubFloat64x8 - OpMaskedXorFloat64x8 OpMaxFloat64x8 OpMinFloat64x8 OpMulFloat64x8 OpMulByPowOf2Float64x8 OpNotEqualFloat64x8 - OpOrFloat64x8 OpSqrtFloat64x8 OpSubFloat64x8 - OpXorFloat64x8 OpAbsoluteInt16x16 OpAddInt16x16 OpAndInt16x16 @@ -18675,35 +18579,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPS512", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS512", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PS512", argLen: 1, @@ -18808,37 +18683,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked512", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PSMasked512", argLen: 2, @@ -18996,22 +18840,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPSMasked512", argLen: 2, @@ -19041,22 +18869,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPS512", argLen: 2, @@ -19116,21 +18928,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPS512", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPS512", argLen: 1, @@ -19158,21 +18955,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPS512", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPS128", argLen: 2, @@ -19202,35 +18984,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS128", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PS128", argLen: 1, @@ -19335,37 +19088,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked128", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PSMasked128", argLen: 2, @@ -19523,22 +19245,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPSMasked128", argLen: 2, @@ -19568,22 +19274,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPS128", argLen: 2, @@ -19643,21 +19333,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPS128", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPS128", argLen: 2, @@ -19713,21 +19388,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPS128", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPS256", argLen: 2, @@ -19757,35 +19417,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS256", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PS256", argLen: 1, @@ -19890,37 +19521,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked256", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PSMasked256", argLen: 2, @@ -20078,22 +19678,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPSMasked256", argLen: 2, @@ -20123,22 +19707,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPS256", argLen: 2, @@ -20198,21 +19766,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPS256", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPS256", argLen: 2, @@ -20268,21 +19821,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPS256", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPD128", argLen: 2, @@ -20312,35 +19850,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD128", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PD128", argLen: 1, @@ -20445,37 +19954,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPDMasked128", - argLen: 3, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PDMasked128", argLen: 2, @@ -20633,22 +20111,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPDMasked128", argLen: 2, @@ -20678,22 +20140,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPD128", argLen: 2, @@ -20753,21 +20199,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPD128", - argLen: 2, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPD128", argLen: 2, @@ -20823,21 +20254,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPD128", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPD256", argLen: 2, @@ -20867,35 +20283,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPD256", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD256", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PD256", argLen: 1, @@ -21000,37 +20387,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPDMasked256", - argLen: 3, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PDMasked256", argLen: 2, @@ -21188,22 +20544,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPDMasked256", argLen: 2, @@ -21233,22 +20573,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPD256", argLen: 2, @@ -21308,21 +20632,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPD256", - argLen: 2, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPD256", argLen: 2, @@ -21378,21 +20687,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPD256", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPD512", argLen: 2, @@ -21408,35 +20702,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD512", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PD512", argLen: 1, @@ -21541,37 +20806,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPDMasked512", - argLen: 3, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PDMasked512", argLen: 2, @@ -21729,22 +20963,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPDMasked512", argLen: 2, @@ -21774,22 +20992,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPD512", argLen: 2, @@ -21849,21 +21051,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPD512", - argLen: 2, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPD512", argLen: 1, @@ -21891,21 +21078,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPD512", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPABSW256", argLen: 1, @@ -59680,17 +58852,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "AndFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x16", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat32x16", argLen: 1, @@ -59759,17 +58920,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat32x16", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat32x16", argLen: 2, @@ -59861,12 +59011,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat32x16", argLen: 2, @@ -59877,12 +59021,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat32x16", argLen: 2, @@ -59912,12 +59050,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "SqrtFloat32x16", argLen: 1, @@ -59928,12 +59060,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "XorFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat32x4", argLen: 2, @@ -59945,17 +59071,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x4", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat32x4", argLen: 1, @@ -60034,17 +59149,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat32x4", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat32x4", argLen: 2, @@ -60136,12 +59240,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat32x4", argLen: 2, @@ -60152,12 +59250,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat32x4", argLen: 2, @@ -60187,12 +59279,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat32x4", argLen: 2, @@ -60223,12 +59309,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat32x8", argLen: 2, @@ -60240,17 +59320,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x8", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat32x8", argLen: 1, @@ -60329,17 +59398,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat32x8", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat32x8", argLen: 2, @@ -60431,12 +59489,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat32x8", argLen: 2, @@ -60447,12 +59499,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat32x8", argLen: 2, @@ -60482,12 +59528,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat32x8", argLen: 2, @@ -60518,12 +59558,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat64x2", argLen: 2, @@ -60535,17 +59569,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat64x2", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat64x2", argLen: 1, @@ -60630,17 +59653,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat64x2", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat64x2", argLen: 2, @@ -60732,12 +59744,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat64x2", argLen: 2, @@ -60748,12 +59754,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat64x2", argLen: 2, @@ -60783,12 +59783,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat64x2", argLen: 2, @@ -60819,12 +59813,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat64x4", argLen: 2, @@ -60836,17 +59824,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat64x4", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat64x4", argLen: 1, @@ -60925,17 +59902,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat64x4", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat64x4", argLen: 2, @@ -61027,12 +59993,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat64x4", argLen: 2, @@ -61043,12 +60003,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat64x4", argLen: 2, @@ -61078,12 +60032,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat64x4", argLen: 2, @@ -61114,29 +60062,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat64x8", argLen: 2, commutative: true, generic: true, }, - { - name: "AndFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat64x8", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat64x8", argLen: 1, @@ -61205,17 +60136,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat64x8", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat64x8", argLen: 2, @@ -61307,12 +60227,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat64x8", argLen: 2, @@ -61323,12 +60237,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat64x8", argLen: 2, @@ -61358,12 +60266,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "SqrtFloat64x8", argLen: 1, @@ -61374,12 +60276,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "XorFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AbsoluteInt16x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6e0726de9b30d2..2e6a9dfaec1690 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -729,24 +729,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndB: v.Op = OpAMD64ANDL return true - case OpAndFloat32x16: - v.Op = OpAMD64VANDPS512 - return true - case OpAndFloat32x4: - v.Op = OpAMD64VANDPS128 - return true - case OpAndFloat32x8: - v.Op = OpAMD64VANDPS256 - return true - case OpAndFloat64x2: - v.Op = OpAMD64VANDPD128 - return true - case OpAndFloat64x4: - v.Op = OpAMD64VANDPD256 - return true - case OpAndFloat64x8: - v.Op = OpAMD64VANDPD512 - return true case OpAndInt16x16: v.Op = OpAMD64VPAND256 return true @@ -777,24 +759,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt8x32: v.Op = OpAMD64VPAND256 return true - case OpAndNotFloat32x16: - v.Op = OpAMD64VANDNPS512 - return true - case OpAndNotFloat32x4: - v.Op = OpAMD64VANDNPS128 - return true - case OpAndNotFloat32x8: - v.Op = OpAMD64VANDNPS256 - return true - case OpAndNotFloat64x2: - v.Op = OpAMD64VANDNPD128 - return true - case OpAndNotFloat64x4: - v.Op = OpAMD64VANDNPD256 - return true - case OpAndNotFloat64x8: - v.Op = OpAMD64VANDNPD512 - return true case OpAndNotInt16x16: v.Op = OpAMD64VPANDN256 return true @@ -1877,18 +1841,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAddUint8x32(v) case OpMaskedAddUint8x64: return rewriteValueAMD64_OpMaskedAddUint8x64(v) - case OpMaskedAndFloat32x16: - return rewriteValueAMD64_OpMaskedAndFloat32x16(v) - case OpMaskedAndFloat32x4: - return rewriteValueAMD64_OpMaskedAndFloat32x4(v) - case OpMaskedAndFloat32x8: - return rewriteValueAMD64_OpMaskedAndFloat32x8(v) - case OpMaskedAndFloat64x2: - return rewriteValueAMD64_OpMaskedAndFloat64x2(v) - case OpMaskedAndFloat64x4: - return rewriteValueAMD64_OpMaskedAndFloat64x4(v) - case OpMaskedAndFloat64x8: - return rewriteValueAMD64_OpMaskedAndFloat64x8(v) case OpMaskedAndInt32x16: return rewriteValueAMD64_OpMaskedAndInt32x16(v) case OpMaskedAndInt32x4: @@ -1901,18 +1853,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAndInt64x4(v) case OpMaskedAndInt64x8: return rewriteValueAMD64_OpMaskedAndInt64x8(v) - case OpMaskedAndNotFloat32x16: - return rewriteValueAMD64_OpMaskedAndNotFloat32x16(v) - case OpMaskedAndNotFloat32x4: - return rewriteValueAMD64_OpMaskedAndNotFloat32x4(v) - case OpMaskedAndNotFloat32x8: - return rewriteValueAMD64_OpMaskedAndNotFloat32x8(v) - case OpMaskedAndNotFloat64x2: - return rewriteValueAMD64_OpMaskedAndNotFloat64x2(v) - case OpMaskedAndNotFloat64x4: - return rewriteValueAMD64_OpMaskedAndNotFloat64x4(v) - case OpMaskedAndNotFloat64x8: - return rewriteValueAMD64_OpMaskedAndNotFloat64x8(v) case OpMaskedAndNotInt32x16: return rewriteValueAMD64_OpMaskedAndNotInt32x16(v) case OpMaskedAndNotInt32x4: @@ -2681,18 +2621,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedNotEqualUint8x32(v) case OpMaskedNotEqualUint8x64: return rewriteValueAMD64_OpMaskedNotEqualUint8x64(v) - case OpMaskedOrFloat32x16: - return rewriteValueAMD64_OpMaskedOrFloat32x16(v) - case OpMaskedOrFloat32x4: - return rewriteValueAMD64_OpMaskedOrFloat32x4(v) - case OpMaskedOrFloat32x8: - return rewriteValueAMD64_OpMaskedOrFloat32x8(v) - case OpMaskedOrFloat64x2: - return rewriteValueAMD64_OpMaskedOrFloat64x2(v) - case OpMaskedOrFloat64x4: - return rewriteValueAMD64_OpMaskedOrFloat64x4(v) - case OpMaskedOrFloat64x8: - return rewriteValueAMD64_OpMaskedOrFloat64x8(v) case OpMaskedOrInt32x16: return rewriteValueAMD64_OpMaskedOrInt32x16(v) case OpMaskedOrInt32x4: @@ -3335,18 +3263,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v) case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8: return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v) - case OpMaskedXorFloat32x16: - return rewriteValueAMD64_OpMaskedXorFloat32x16(v) - case OpMaskedXorFloat32x4: - return rewriteValueAMD64_OpMaskedXorFloat32x4(v) - case OpMaskedXorFloat32x8: - return rewriteValueAMD64_OpMaskedXorFloat32x8(v) - case OpMaskedXorFloat64x2: - return rewriteValueAMD64_OpMaskedXorFloat64x2(v) - case OpMaskedXorFloat64x4: - return rewriteValueAMD64_OpMaskedXorFloat64x4(v) - case OpMaskedXorFloat64x8: - return rewriteValueAMD64_OpMaskedXorFloat64x8(v) case OpMaskedXorInt32x16: return rewriteValueAMD64_OpMaskedXorInt32x16(v) case OpMaskedXorInt32x4: @@ -3823,24 +3739,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOrB: v.Op = OpAMD64ORL return true - case OpOrFloat32x16: - v.Op = OpAMD64VORPS512 - return true - case OpOrFloat32x4: - v.Op = OpAMD64VORPS128 - return true - case OpOrFloat32x8: - v.Op = OpAMD64VORPS256 - return true - case OpOrFloat64x2: - v.Op = OpAMD64VORPD128 - return true - case OpOrFloat64x4: - v.Op = OpAMD64VORPD256 - return true - case OpOrFloat64x8: - v.Op = OpAMD64VORPD512 - return true case OpOrInt16x16: v.Op = OpAMD64VPOR256 return true @@ -5172,24 +5070,6 @@ func rewriteValueAMD64(v *Value) bool { case OpXor8: v.Op = OpAMD64XORL return true - case OpXorFloat32x16: - v.Op = OpAMD64VXORPS512 - return true - case OpXorFloat32x4: - v.Op = OpAMD64VXORPS128 - return true - case OpXorFloat32x8: - v.Op = OpAMD64VXORPS256 - return true - case OpXorFloat64x2: - v.Op = OpAMD64VXORPD128 - return true - case OpXorFloat64x4: - v.Op = OpAMD64VXORPD256 - return true - case OpXorFloat64x8: - v.Op = OpAMD64VXORPD512 - return true case OpXorInt16x16: v.Op = OpAMD64VPXOR256 return true @@ -35257,114 +35137,6 @@ func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat32x16 x y mask) - // result: (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat32x4 x y mask) - // result: (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat32x8 x y mask) - // result: (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat64x2 x y mask) - // result: (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat64x4 x y mask) - // result: (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat64x8 x y mask) - // result: (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -35473,114 +35245,6 @@ func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat32x16 x y mask) - // result: (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat32x4 x y mask) - // result: (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat32x8 x y mask) - // result: (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat64x2 x y mask) - // result: (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat64x4 x y mask) - // result: (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat64x8 x y mask) - // result: (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43261,114 +42925,6 @@ func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x16 x y mask) - // result: (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x4 x y mask) - // result: (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x8 x y mask) - // result: (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x2 x y mask) - // result: (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x4 x y mask) - // result: (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x8 x y mask) - // result: (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49267,114 +48823,6 @@ func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Va return true } } -func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x16 x y mask) - // result: (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x4 x y mask) - // result: (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x8 x y mask) - // result: (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x2 x y mask) - // result: (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x4 x y mask) - // result: (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x8 x y mask) - // result: (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 27aad1cc0c449d..a476e66845627c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -57,12 +57,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.And", opLen2(ssa.OpAndFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) @@ -83,12 +77,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) @@ -421,12 +409,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) @@ -439,12 +421,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) @@ -823,12 +799,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) @@ -1150,12 +1120,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) @@ -1295,12 +1259,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) @@ -1710,12 +1668,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index b3f18b383772b6..d4cf7f6b7413e2 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -20,10 +20,6 @@ func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -34,16 +30,12 @@ func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -66,10 +58,6 @@ func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x4()) case "MaskedMax": @@ -80,12 +68,8 @@ func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask32x4()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -286,10 +270,6 @@ func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -300,16 +280,12 @@ func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -332,10 +308,6 @@ func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x8()) case "MaskedMax": @@ -346,12 +318,8 @@ func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask32x8()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -550,10 +518,6 @@ func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -564,12 +528,8 @@ func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -592,10 +552,6 @@ func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []i switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) case "MaskedMax": @@ -606,12 +562,8 @@ func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []i gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -804,10 +756,6 @@ func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "DotProdBroadcast": @@ -820,16 +768,12 @@ func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -852,10 +796,6 @@ func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x2()) case "MaskedMax": @@ -866,12 +806,8 @@ func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask64x2()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x2()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -1072,10 +1008,6 @@ func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -1086,16 +1018,12 @@ func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1118,10 +1046,6 @@ func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x4()) case "MaskedMax": @@ -1132,12 +1056,8 @@ func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask64x4()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1336,10 +1256,6 @@ func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -1350,12 +1266,8 @@ func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1378,10 +1290,6 @@ func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x8()) case "MaskedMax": @@ -1392,12 +1300,8 @@ func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask64x8()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 3453843d0f7991..fa99bba7bb7bed 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -242,36 +242,6 @@ func (x Float64x4) AddSub(y Float64x4) Float64x4 /* And */ -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x4) And(y Float32x4) Float32x4 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x8) And(y Float32x8) Float32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) And(y Float32x16) Float32x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x2) And(y Float64x2) Float64x2 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x4) And(y Float64x4) Float64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) And(y Float64x8) Float64x8 - // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -374,36 +344,6 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndNot */ -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x4) AndNot(y Float32x4) Float32x4 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x8) AndNot(y Float32x8) Float32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) AndNot(y Float32x16) Float32x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x2) AndNot(y Float64x2) Float64x2 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x4) AndNot(y Float64x4) Float64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) AndNot(y Float64x8) Float64x8 - // AndNot performs a bitwise AND NOT operation between two vectors. // // Asm: VPANDN, CPU Feature: AVX @@ -2148,36 +2088,6 @@ func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 /* MaskedAnd */ -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 - // And performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX @@ -2240,36 +2150,6 @@ func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 /* MaskedAndNot */ -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 - // AndNot performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX @@ -4252,36 +4132,6 @@ func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedOr */ -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 - // Or performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX @@ -6021,36 +5871,6 @@ func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x /* MaskedXor */ -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 - // Xor performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX @@ -6774,36 +6594,6 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* Or */ -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX -func (x Float32x4) Or(y Float32x4) Float32x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX -func (x Float32x8) Or(y Float32x8) Float32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Or(y Float32x16) Float32x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX -func (x Float64x2) Or(y Float64x2) Float64x2 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX -func (x Float64x4) Or(y Float64x4) Float64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Or(y Float64x8) Float64x8 - // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -9035,36 +8825,6 @@ func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Ui /* Xor */ -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x4) Xor(y Float32x4) Float32x4 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x8) Xor(y Float32x8) Float32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Xor(y Float32x16) Float32x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x2) Xor(y Float64x2) Float64x2 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x4) Xor(y Float64x4) Float64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Xor(y Float64x8) Float64x8 - // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX From 983e81ce578447dd384c9631dd9e2d9e730db6f6 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 17:29:32 +0000 Subject: [PATCH 067/139] [dev.simd] simd: rename stubs_amd64.go to ops_amd64.go Change-Id: I42c3c8aed8bb19e251ae2aa0ee0f08e7796f1f4a Reviewed-on: https://go-review.googlesource.com/c/go/+/686497 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/{stubs_amd64.go => ops_amd64.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/simd/{stubs_amd64.go => ops_amd64.go} (100%) diff --git a/src/simd/stubs_amd64.go b/src/simd/ops_amd64.go similarity index 100% rename from src/simd/stubs_amd64.go rename to src/simd/ops_amd64.go From 029d7ec3e937fe302d58b393c422195e5a2adc1d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 18:18:55 +0000 Subject: [PATCH 068/139] [dev.simd] cmd/compile, simd: rename Masked$OP to $(OP)Masked. This CL is generated by CL 686575. Change-Id: I1483189a1ae9bed51446fd69daab3f7b128549ae Reviewed-on: https://go-review.googlesource.com/c/go/+/686516 Reviewed-by: David Chase TryBot-Bypass: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 92 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 1530 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 718 +- .../internal/ssa/_gen/simdgenericOps.go | 1530 +- src/cmd/compile/internal/ssa/opGen.go | 15092 ++++---- src/cmd/compile/internal/ssa/rewriteAMD64.go | 31352 ++++++++-------- .../compile/internal/ssagen/simdintrinsics.go | 1530 +- src/simd/ops_amd64.go | 8108 ++-- src/simd/simd_test.go | 6 +- src/simd/simd_wrapped_test.go | 2578 +- 10 files changed, 31268 insertions(+), 31268 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 2266f8d7ef204d..50339bf202d09e 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -425,12 +425,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -458,6 +452,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -888,12 +888,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRQ128: p = simdFpgpImm8(s, v) - case ssa.OpAMD64VGF2P8AFFINEQBMasked128, - ssa.OpAMD64VGF2P8AFFINEQBMasked256, - ssa.OpAMD64VGF2P8AFFINEQBMasked512, - ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, @@ -1017,12 +1017,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VGF2P8AFFINEQBMasked128, - ssa.OpAMD64VGF2P8AFFINEQBMasked256, - ssa.OpAMD64VGF2P8AFFINEQBMasked512, ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, ssa.OpAMD64VGF2P8MULBMasked128, ssa.OpAMD64VGF2P8MULBMasked256, ssa.OpAMD64VGF2P8MULBMasked512, @@ -1086,12 +1086,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -1119,18 +1113,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPMADDWDMasked256, - ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1188,9 +1188,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, - ssa.OpAMD64VPSLLQMasked128, - ssa.OpAMD64VPSLLQMasked256, - ssa.OpAMD64VPSLLQMasked512, ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, @@ -1200,9 +1197,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDQMasked128, ssa.OpAMD64VPSHLDQMasked256, ssa.OpAMD64VPSHLDQMasked512, - ssa.OpAMD64VPSRLQMasked128, - ssa.OpAMD64VPSRLQMasked256, - ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSLLQMasked128, + ssa.OpAMD64VPSLLQMasked256, + ssa.OpAMD64VPSLLQMasked512, ssa.OpAMD64VPSHRDWMasked128, ssa.OpAMD64VPSHRDWMasked256, ssa.OpAMD64VPSHRDWMasked512, @@ -1212,18 +1209,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked128, ssa.OpAMD64VPSHRDQMasked256, ssa.OpAMD64VPSHRDQMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, ssa.OpAMD64VPSRAQMasked512, - ssa.OpAMD64VPSLLVWMasked128, - ssa.OpAMD64VPSLLVWMasked256, - ssa.OpAMD64VPSLLVWMasked512, - ssa.OpAMD64VPSLLVDMasked128, - ssa.OpAMD64VPSLLVDMasked256, - ssa.OpAMD64VPSLLVDMasked512, - ssa.OpAMD64VPSLLVQMasked128, - ssa.OpAMD64VPSLLVQMasked256, - ssa.OpAMD64VPSLLVQMasked512, ssa.OpAMD64VPSHLDVWMasked128, ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, @@ -1233,15 +1224,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDVQMasked128, ssa.OpAMD64VPSHLDVQMasked256, ssa.OpAMD64VPSHLDVQMasked512, - ssa.OpAMD64VPSRLVWMasked128, - ssa.OpAMD64VPSRLVWMasked256, - ssa.OpAMD64VPSRLVWMasked512, - ssa.OpAMD64VPSRLVDMasked128, - ssa.OpAMD64VPSRLVDMasked256, - ssa.OpAMD64VPSRLVDMasked512, - ssa.OpAMD64VPSRLVQMasked128, - ssa.OpAMD64VPSRLVQMasked256, - ssa.OpAMD64VPSRLVQMasked512, + ssa.OpAMD64VPSLLVWMasked128, + ssa.OpAMD64VPSLLVWMasked256, + ssa.OpAMD64VPSLLVWMasked512, + ssa.OpAMD64VPSLLVDMasked128, + ssa.OpAMD64VPSLLVDMasked256, + ssa.OpAMD64VPSLLVDMasked512, + ssa.OpAMD64VPSLLVQMasked128, + ssa.OpAMD64VPSLLVQMasked256, + ssa.OpAMD64VPSLLVQMasked512, ssa.OpAMD64VPSHRDVWMasked128, ssa.OpAMD64VPSHRDVWMasked256, ssa.OpAMD64VPSHRDVWMasked512, @@ -1251,6 +1242,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVQMasked128, ssa.OpAMD64VPSHRDVQMasked256, ssa.OpAMD64VPSHRDVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VPSRAVWMasked128, ssa.OpAMD64VPSRAVWMasked256, ssa.OpAMD64VPSRAVWMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index bcd227d4b99888..7ea24fe95cea22 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -12,6 +12,18 @@ (AbsoluteInt64x2 ...) => (VPABSQ128 ...) (AbsoluteInt64x4 ...) => (VPABSQ256 ...) (AbsoluteInt64x8 ...) => (VPABSQ512 ...) +(AbsoluteMaskedInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(AbsoluteMaskedInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(AbsoluteMaskedInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(AbsoluteMaskedInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(AbsoluteMaskedInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(AbsoluteMaskedInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(AbsoluteMaskedInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(AbsoluteMaskedInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(AbsoluteMaskedInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(AbsoluteMaskedInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(AbsoluteMaskedInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(AbsoluteMaskedInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) (AddFloat32x4 ...) => (VADDPS128 ...) (AddFloat32x8 ...) => (VADDPS256 ...) (AddFloat32x16 ...) => (VADDPS512 ...) @@ -42,6 +54,36 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) +(AddMaskedFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(AddMaskedFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(AddMaskedFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(AddMaskedFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(AddMaskedFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(AddMaskedFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(AddMaskedInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddMaskedInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddMaskedInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddMaskedInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddMaskedInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddMaskedInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddMaskedInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AddMaskedInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AddMaskedInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AddMaskedInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AddMaskedInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AddMaskedInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(AddMaskedUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddMaskedUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddMaskedUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddMaskedUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddMaskedUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddMaskedUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddMaskedUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AddMaskedUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AddMaskedUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AddMaskedUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AddMaskedUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AddMaskedUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) @@ -66,6 +108,18 @@ (AndUint64x2 ...) => (VPAND128 ...) (AndUint64x4 ...) => (VPAND256 ...) (AndUint64x8 ...) => (VPANDQ512 ...) +(AndMaskedInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndMaskedInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndMaskedInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndMaskedInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndMaskedInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndMaskedInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(AndMaskedUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndMaskedUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndMaskedUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndMaskedUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndMaskedUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndMaskedUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) (AndNotInt8x16 ...) => (VPANDN128 ...) (AndNotInt8x32 ...) => (VPANDN256 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) @@ -86,24 +140,54 @@ (AndNotUint64x2 ...) => (VPANDN128 ...) (AndNotUint64x4 ...) => (VPANDN256 ...) (AndNotUint64x8 ...) => (VPANDNQ512 ...) +(AndNotMaskedInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndNotMaskedInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndNotMaskedInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndNotMaskedInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndNotMaskedInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndNotMaskedInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) +(AndNotMaskedUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndNotMaskedUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndNotMaskedUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndNotMaskedUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndNotMaskedUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndNotMaskedUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) (ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) (ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) (ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) (ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) (ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) +(ApproximateReciprocalMaskedFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ApproximateReciprocalMaskedFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ApproximateReciprocalMaskedFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ApproximateReciprocalMaskedFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ApproximateReciprocalMaskedFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ApproximateReciprocalMaskedFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) (ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) (ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) (ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) (ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) (ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) (ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) +(ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) (AverageUint16x8 ...) => (VPAVGW128 ...) (AverageUint16x16 ...) => (VPAVGW256 ...) (AverageUint16x32 ...) => (VPAVGW512 ...) +(AverageMaskedUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) +(AverageMaskedUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) +(AverageMaskedUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) +(AverageMaskedUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) +(AverageMaskedUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) +(AverageMaskedUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) (CeilFloat32x4 x) => (VROUNDPS128 [2] x) (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) @@ -114,36 +198,72 @@ (CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) (CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) (CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) +(CeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(CeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(CeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(CeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(CeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(CeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) (DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) (DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) +(DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) (DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) (DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) (DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) (DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) +(DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) (DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) (DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) (DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) (DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) (DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) (DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) (DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) +(DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) (DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) +(DivMaskedFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) +(DivMaskedFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) +(DivMaskedFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) +(DivMaskedFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) +(DivMaskedFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) +(DivMaskedFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) (DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) @@ -175,6 +295,36 @@ (EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) (EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) (EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) +(EqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(EqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(EqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(EqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(EqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(EqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(EqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(EqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(EqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(EqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(EqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(EqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(EqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(EqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(EqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(EqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(EqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(EqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(EqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(EqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(EqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(EqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(EqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(EqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(EqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(EqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(EqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(EqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(EqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(EqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) (FloorFloat32x4 x) => (VROUNDPS128 [1] x) (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) @@ -185,33 +335,66 @@ (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) +(FloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(FloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(FloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(FloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(FloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(FloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) (FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) (FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) (FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) (FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) (FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) +(FusedMultiplyAddMaskedFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(FusedMultiplyAddMaskedFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(FusedMultiplyAddMaskedFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(FusedMultiplyAddMaskedFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(FusedMultiplyAddMaskedFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(FusedMultiplyAddMaskedFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) (FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) (FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) (FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) (FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) (FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) +(FusedMultiplyAddSubMaskedFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(FusedMultiplyAddSubMaskedFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(FusedMultiplyAddSubMaskedFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(FusedMultiplyAddSubMaskedFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(FusedMultiplyAddSubMaskedFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(FusedMultiplyAddSubMaskedFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) (FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) (FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(FusedMultiplySubAddMaskedFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(FusedMultiplySubAddMaskedFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(FusedMultiplySubAddMaskedFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(FusedMultiplySubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(FusedMultiplySubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(FusedMultiplySubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (GaloisFieldAffineTransformUint8x16 [a] x y) => (VGF2P8AFFINEQB128 [a] x y) (GaloisFieldAffineTransformUint8x32 [a] x y) => (VGF2P8AFFINEQB256 [a] x y) (GaloisFieldAffineTransformUint8x64 [a] x y) => (VGF2P8AFFINEQB512 [a] x y) (GaloisFieldAffineTransformInversedUint8x16 [a] x y) => (VGF2P8AFFINEINVQB128 [a] x y) (GaloisFieldAffineTransformInversedUint8x32 [a] x y) => (VGF2P8AFFINEINVQB256 [a] x y) (GaloisFieldAffineTransformInversedUint8x64 [a] x y) => (VGF2P8AFFINEINVQB512 [a] x y) +(GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) (GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) (GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) (GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) +(GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) +(GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) +(GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) (Get128Float32x8 [a] x) => (VEXTRACTF128128 [a] x) (Get128Float64x4 [a] x) => (VEXTRACTF128128 [a] x) (Get128Int8x32 [a] x) => (VEXTRACTI128128 [a] x) @@ -290,12 +473,78 @@ (GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) (GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) +(GreaterEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(GreaterEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(GreaterEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(GreaterEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(GreaterEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(GreaterEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(GreaterEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(GreaterEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(GreaterEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(GreaterEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(GreaterEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(GreaterEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(GreaterEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(GreaterEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(GreaterEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(GreaterEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(GreaterEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(GreaterEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(GreaterEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(GreaterEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(GreaterEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(GreaterEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(GreaterEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(GreaterEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(GreaterEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(GreaterEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(GreaterEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(GreaterEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(GreaterEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(GreaterEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(GreaterMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(GreaterMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(GreaterMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(GreaterMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(GreaterMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(GreaterMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(GreaterMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(GreaterMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(GreaterMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(GreaterMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(GreaterMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(GreaterMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(GreaterMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(GreaterMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(GreaterMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(GreaterMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(GreaterMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(GreaterMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(GreaterMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(GreaterMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(GreaterMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(GreaterMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(GreaterMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(GreaterMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(GreaterMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(GreaterMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(GreaterMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(GreaterMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(GreaterMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(GreaterMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) (IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) (IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) (IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) (IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) +(IsNanMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) +(IsNanMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) +(IsNanMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) +(IsNanMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) +(IsNanMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) +(IsNanMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) (LessFloat32x4 x y) => (VCMPPS128 [1] x y) (LessFloat32x8 x y) => (VCMPPS256 [1] x y) (LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) @@ -356,771 +605,66 @@ (LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) (LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) (LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) -(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) -(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) -(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSubFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSubFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSubFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAddFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAddFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) -(MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) -(MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) -(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) -(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) -(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) -(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) -(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) -(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) -(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) -(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) -(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) -(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedOrInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedOrInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedOrInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedOrUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedOrUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllLeftInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllLeftInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllLeftInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllLeftInt64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllLeftInt64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllLeftInt64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllLeftUint32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllLeftUint32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllLeftUint32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllLeftUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllLeftUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllLeftUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllRightInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllRightInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllRightInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllRightInt64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllRightInt64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllRightInt64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllRightUint32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllRightUint32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllRightUint32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllRightUint64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllRightUint64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllRightUint64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateLeftInt32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateLeftInt32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateLeftInt32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateLeftInt64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateLeftInt64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateLeftInt64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRotateLeftUint32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateLeftUint32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateLeftUint32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateLeftUint64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateLeftUint64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateLeftUint64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRotateRightInt32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateRightInt32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateRightInt32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateRightInt64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateRightInt64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateRightInt64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRotateRightUint32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateRightUint32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateRightUint32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateRightUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateRightUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateRightUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(MaskedRoundWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(MaskedRoundWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(MaskedRoundWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftAllLeftInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllLeftUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightSignExtendedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightSignExtendedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightSignExtendedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftInt32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftInt32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftInt32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftInt64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftInt64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftInt64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftUint16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftUint16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftUint16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftUint32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftUint32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftUint32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftRightInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftRightUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightUint32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightUint32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightUint32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftRightSignExtendedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightSignExtendedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightSignExtendedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightSignExtendedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightSignExtendedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightSignExtendedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightSignExtendedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightSignExtendedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightSignExtendedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftRightSignExtendedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightSignExtendedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightSignExtendedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightSignExtendedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightSignExtendedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightSignExtendedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightSignExtendedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightSignExtendedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightSignExtendedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(LessEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(LessEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(LessEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(LessEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(LessEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(LessEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(LessEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(LessEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(LessEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(LessEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(LessEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(LessEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(LessEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(LessEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(LessEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(LessEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(LessEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(LessEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(LessEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(LessEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(LessEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(LessEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(LessEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(LessEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(LessEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(LessEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(LessEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(LessEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(LessEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(LessEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(LessMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(LessMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(LessMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(LessMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(LessMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(LessMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(LessMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(LessMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(LessMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(LessMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(LessMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(LessMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(LessMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(LessMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(LessMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(LessMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(LessMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(LessMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(LessMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(LessMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(LessMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(LessMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(LessMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(LessMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(LessMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(LessMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(LessMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(LessMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(LessMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(LessMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) (MaxFloat32x4 ...) => (VMAXPS128 ...) (MaxFloat32x8 ...) => (VMAXPS256 ...) (MaxFloat32x16 ...) => (VMAXPS512 ...) @@ -1151,6 +695,36 @@ (MaxUint64x2 ...) => (VPMAXUQ128 ...) (MaxUint64x4 ...) => (VPMAXUQ256 ...) (MaxUint64x8 ...) => (VPMAXUQ512 ...) +(MaxMaskedFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaxMaskedFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaxMaskedFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaxMaskedFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaxMaskedFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaxMaskedFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaxMaskedInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaxMaskedInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaxMaskedInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaxMaskedInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaxMaskedInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaxMaskedInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaxMaskedInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaxMaskedInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaxMaskedInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaxMaskedInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaxMaskedInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaxMaskedInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaxMaskedUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaxMaskedUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaxMaskedUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaxMaskedUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaxMaskedUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaxMaskedUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaxMaskedUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaxMaskedUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaxMaskedUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaxMaskedUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaxMaskedUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaxMaskedUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) (MinFloat32x4 ...) => (VMINPS128 ...) (MinFloat32x8 ...) => (VMINPS256 ...) (MinFloat32x16 ...) => (VMINPS512 ...) @@ -1181,6 +755,36 @@ (MinUint64x2 ...) => (VPMINUQ128 ...) (MinUint64x4 ...) => (VPMINUQ256 ...) (MinUint64x8 ...) => (VPMINUQ512 ...) +(MinMaskedFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MinMaskedFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MinMaskedFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MinMaskedFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MinMaskedFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MinMaskedFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MinMaskedInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MinMaskedInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MinMaskedInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MinMaskedInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MinMaskedInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MinMaskedInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MinMaskedInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MinMaskedInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MinMaskedInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MinMaskedInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MinMaskedInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MinMaskedInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MinMaskedUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MinMaskedUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MinMaskedUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MinMaskedUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MinMaskedUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MinMaskedUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MinMaskedUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MinMaskedUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MinMaskedUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MinMaskedUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MinMaskedUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MinMaskedUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) (MulFloat32x4 ...) => (VMULPS128 ...) (MulFloat32x8 ...) => (VMULPS256 ...) (MulFloat32x16 ...) => (VMULPS512 ...) @@ -1193,6 +797,12 @@ (MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) (MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) (MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) +(MulByPowOf2MaskedFloat32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MulByPowOf2MaskedFloat32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MulByPowOf2MaskedFloat32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MulByPowOf2MaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MulByPowOf2MaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MulByPowOf2MaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) (MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) (MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) @@ -1203,12 +813,24 @@ (MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) (MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) +(MulEvenWidenMaskedInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulEvenWidenMaskedInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulEvenWidenMaskedInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulEvenWidenMaskedUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulEvenWidenMaskedUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulEvenWidenMaskedUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) (MulHighInt16x8 ...) => (VPMULHW128 ...) (MulHighInt16x16 ...) => (VPMULHW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) (MulHighUint16x8 ...) => (VPMULHUW128 ...) (MulHighUint16x16 ...) => (VPMULHUW256 ...) (MulHighUint16x32 ...) => (VPMULHUW512 ...) +(MulHighMaskedInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulHighMaskedInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) (MulLowInt16x8 ...) => (VPMULLW128 ...) (MulLowInt16x16 ...) => (VPMULLW256 ...) (MulLowInt16x32 ...) => (VPMULLW512 ...) @@ -1218,6 +840,21 @@ (MulLowInt64x2 ...) => (VPMULLQ128 ...) (MulLowInt64x4 ...) => (VPMULLQ256 ...) (MulLowInt64x8 ...) => (VPMULLQ512 ...) +(MulLowMaskedInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulLowMaskedInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulLowMaskedInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulLowMaskedInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MulLowMaskedInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MulLowMaskedInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MulLowMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulLowMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulLowMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MulMaskedFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MulMaskedFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MulMaskedFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) (NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) @@ -1248,6 +885,36 @@ (NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) (NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) +(NotEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(NotEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(NotEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(NotEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(NotEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(NotEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(NotEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(NotEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(NotEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(NotEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(NotEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(NotEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(NotEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(NotEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(NotEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(NotEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(NotEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(NotEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(NotEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(NotEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(NotEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(NotEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(NotEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(NotEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(NotEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(NotEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(NotEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(NotEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(NotEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(NotEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) (OrInt16x8 ...) => (VPOR128 ...) @@ -1268,12 +935,30 @@ (OrUint64x2 ...) => (VPOR128 ...) (OrUint64x4 ...) => (VPOR256 ...) (OrUint64x8 ...) => (VPORQ512 ...) +(OrMaskedInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(OrMaskedInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(OrMaskedInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(OrMaskedInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(OrMaskedInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(OrMaskedInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(OrMaskedUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(OrMaskedUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(OrMaskedUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(OrMaskedUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(OrMaskedUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(OrMaskedUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) (PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) (PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) (PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) (PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) +(PairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(PairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(PairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) +(PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) +(PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) @@ -1322,6 +1007,30 @@ (PopCountUint64x2 ...) => (VPOPCNTQ128 ...) (PopCountUint64x4 ...) => (VPOPCNTQ256 ...) (PopCountUint64x8 ...) => (VPOPCNTQ512 ...) +(PopCountMaskedInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(PopCountMaskedInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(PopCountMaskedInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(PopCountMaskedInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(PopCountMaskedInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(PopCountMaskedInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(PopCountMaskedInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(PopCountMaskedInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(PopCountMaskedInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(PopCountMaskedInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(PopCountMaskedInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(PopCountMaskedInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(PopCountMaskedUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(PopCountMaskedUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(PopCountMaskedUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(PopCountMaskedUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(PopCountMaskedUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(PopCountMaskedUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(PopCountMaskedUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(PopCountMaskedUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(PopCountMaskedUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(PopCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(PopCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(PopCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) (RotateAllLeftInt32x4 [a] x) => (VPROLD128 [a] x) (RotateAllLeftInt32x8 [a] x) => (VPROLD256 [a] x) (RotateAllLeftInt32x16 [a] x) => (VPROLD512 [a] x) @@ -1334,6 +1043,18 @@ (RotateAllLeftUint64x2 [a] x) => (VPROLQ128 [a] x) (RotateAllLeftUint64x4 [a] x) => (VPROLQ256 [a] x) (RotateAllLeftUint64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllLeftMaskedInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllLeftMaskedInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllLeftMaskedInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllLeftMaskedInt64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllLeftMaskedInt64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllLeftMaskedInt64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(RotateAllLeftMaskedUint32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllLeftMaskedUint32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllLeftMaskedUint32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllLeftMaskedUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllLeftMaskedUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllLeftMaskedUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) (RotateAllRightInt32x4 [a] x) => (VPRORD128 [a] x) (RotateAllRightInt32x8 [a] x) => (VPRORD256 [a] x) (RotateAllRightInt32x16 [a] x) => (VPRORD512 [a] x) @@ -1346,6 +1067,18 @@ (RotateAllRightUint64x2 [a] x) => (VPRORQ128 [a] x) (RotateAllRightUint64x4 [a] x) => (VPRORQ256 [a] x) (RotateAllRightUint64x8 [a] x) => (VPRORQ512 [a] x) +(RotateAllRightMaskedInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllRightMaskedInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllRightMaskedInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllRightMaskedInt64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllRightMaskedInt64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllRightMaskedInt64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(RotateAllRightMaskedUint32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllRightMaskedUint32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllRightMaskedUint32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllRightMaskedUint64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllRightMaskedUint64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllRightMaskedUint64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) (RotateLeftInt32x4 ...) => (VPROLVD128 ...) (RotateLeftInt32x8 ...) => (VPROLVD256 ...) (RotateLeftInt32x16 ...) => (VPROLVD512 ...) @@ -1358,6 +1091,18 @@ (RotateLeftUint64x2 ...) => (VPROLVQ128 ...) (RotateLeftUint64x4 ...) => (VPROLVQ256 ...) (RotateLeftUint64x8 ...) => (VPROLVQ512 ...) +(RotateLeftMaskedInt32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateLeftMaskedInt32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateLeftMaskedInt32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateLeftMaskedInt64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateLeftMaskedInt64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateLeftMaskedInt64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(RotateLeftMaskedUint32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateLeftMaskedUint32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateLeftMaskedUint32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateLeftMaskedUint64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateLeftMaskedUint64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateLeftMaskedUint64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) (RotateRightInt32x4 ...) => (VPRORVD128 ...) (RotateRightInt32x8 ...) => (VPRORVD256 ...) (RotateRightInt32x16 ...) => (VPRORVD512 ...) @@ -1370,6 +1115,18 @@ (RotateRightUint64x2 ...) => (VPRORVQ128 ...) (RotateRightUint64x4 ...) => (VPRORVQ256 ...) (RotateRightUint64x8 ...) => (VPRORVQ512 ...) +(RotateRightMaskedInt32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateRightMaskedInt32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateRightMaskedInt32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateRightMaskedInt64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateRightMaskedInt64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateRightMaskedInt64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) +(RotateRightMaskedUint32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateRightMaskedUint32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateRightMaskedUint32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateRightMaskedUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateRightMaskedUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateRightMaskedUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) (RoundFloat32x4 x) => (VROUNDPS128 [0] x) (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) @@ -1380,6 +1137,12 @@ (RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) (RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) (RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) +(RoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (SaturatedAddInt8x16 ...) => (VPADDSB128 ...) (SaturatedAddInt8x32 ...) => (VPADDSB256 ...) (SaturatedAddInt8x64 ...) => (VPADDSB512 ...) @@ -1392,9 +1155,24 @@ (SaturatedAddUint16x8 ...) => (VPADDSW128 ...) (SaturatedAddUint16x16 ...) => (VPADDSW256 ...) (SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(SaturatedAddMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedAddMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedAddMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedAddMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedAddMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedAddMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SaturatedAddMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedAddMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedAddMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedAddMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedAddMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedAddMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) (SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) (SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) (SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) @@ -1411,15 +1189,36 @@ (SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) (SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) (SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedSubMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedSubMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedSubMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedSubMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedSubMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SaturatedSubMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedSubMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedSubMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedSubMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedSubMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedSubMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) +(SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (Set128Float32x8 [a] x y) => (VINSERTF128256 [a] x y) (Set128Float64x4 [a] x y) => (VINSERTF128256 [a] x y) (Set128Int8x32 [a] x y) => (VINSERTI128256 [a] x y) @@ -1470,6 +1269,30 @@ (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) => (VPSHLDQ128 [a] x y) (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) => (VPSHLDQ256 [a] x y) (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftAllRightInt16x8 ...) => (VPSRLW128 ...) (ShiftAllRightInt16x16 ...) => (VPSRLW256 ...) (ShiftAllRightInt32x4 ...) => (VPSRLD128 ...) @@ -1502,6 +1325,30 @@ (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) => (VPSHRDQ128 [a] x y) (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) => (VPSHRDQ256 [a] x y) (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftAllRightSignExtendedInt16x8 ...) => (VPSRAW128 ...) (ShiftAllRightSignExtendedInt16x16 ...) => (VPSRAW256 ...) (ShiftAllRightSignExtendedInt32x4 ...) => (VPSRAD128 ...) @@ -1509,6 +1356,9 @@ (ShiftAllRightSignExtendedInt64x2 ...) => (VPSRAQ128 ...) (ShiftAllRightSignExtendedInt64x4 ...) => (VPSRAQ256 ...) (ShiftAllRightSignExtendedInt64x8 ...) => (VPSRAQ512 ...) +(ShiftAllRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) (ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) @@ -1545,6 +1395,42 @@ (ShiftLeftAndFillUpperFromUint64x2 ...) => (VPSHLDVQ128 ...) (ShiftLeftAndFillUpperFromUint64x4 ...) => (VPSHLDVQ256 ...) (ShiftLeftAndFillUpperFromUint64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftMaskedInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftLeftMaskedInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftLeftMaskedInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftLeftMaskedInt32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftLeftMaskedInt32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftLeftMaskedInt32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftLeftMaskedInt64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftLeftMaskedInt64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftLeftMaskedInt64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftLeftMaskedUint16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftLeftMaskedUint16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftLeftMaskedUint16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftLeftMaskedUint32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftLeftMaskedUint32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftLeftMaskedUint32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftLeftMaskedUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftLeftMaskedUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftLeftMaskedUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftRightInt16x8 ...) => (VPSRLVW128 ...) (ShiftRightInt16x16 ...) => (VPSRLVW256 ...) (ShiftRightInt16x32 ...) => (VPSRLVW512 ...) @@ -1581,6 +1467,42 @@ (ShiftRightAndFillUpperFromUint64x2 ...) => (VPSHRDVQ128 ...) (ShiftRightAndFillUpperFromUint64x4 ...) => (VPSHRDVQ256 ...) (ShiftRightAndFillUpperFromUint64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightMaskedInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightMaskedInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightMaskedInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightMaskedInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightMaskedInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightMaskedInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightMaskedInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightMaskedInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightMaskedInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftRightMaskedUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightMaskedUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightMaskedUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightMaskedUint32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightMaskedUint32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightMaskedUint32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightMaskedUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightMaskedUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightMaskedUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftRightSignExtendedInt16x8 ...) => (VPSRAVW128 ...) (ShiftRightSignExtendedInt16x16 ...) => (VPSRAVW256 ...) (ShiftRightSignExtendedInt16x32 ...) => (VPSRAVW512 ...) @@ -1599,6 +1521,24 @@ (ShiftRightSignExtendedUint64x2 ...) => (VPSRAVQ128 ...) (ShiftRightSignExtendedUint64x4 ...) => (VPSRAVQ256 ...) (ShiftRightSignExtendedUint64x8 ...) => (VPSRAVQ512 ...) +(ShiftRightSignExtendedMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightSignExtendedMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightSignExtendedMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightSignExtendedMaskedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightSignExtendedMaskedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightSignExtendedMaskedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftRightSignExtendedMaskedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightSignExtendedMaskedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightSignExtendedMaskedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightSignExtendedMaskedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightSignExtendedMaskedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightSignExtendedMaskedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightSignExtendedMaskedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightSignExtendedMaskedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightSignExtendedMaskedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (SignInt8x16 ...) => (VPSIGNB128 ...) (SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) @@ -1611,6 +1551,12 @@ (SqrtFloat64x2 ...) => (VSQRTPD128 ...) (SqrtFloat64x4 ...) => (VSQRTPD256 ...) (SqrtFloat64x8 ...) => (VSQRTPD512 ...) +(SqrtMaskedFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) +(SqrtMaskedFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(SqrtMaskedFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(SqrtMaskedFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) +(SqrtMaskedFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) +(SqrtMaskedFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) (SubFloat32x4 ...) => (VSUBPS128 ...) (SubFloat32x8 ...) => (VSUBPS256 ...) (SubFloat32x16 ...) => (VSUBPS512 ...) @@ -1641,6 +1587,36 @@ (SubUint64x2 ...) => (VPSUBQ128 ...) (SubUint64x4 ...) => (VPSUBQ256 ...) (SubUint64x8 ...) => (VPSUBQ512 ...) +(SubMaskedFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) +(SubMaskedFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) +(SubMaskedFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) +(SubMaskedFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) +(SubMaskedFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) +(SubMaskedFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) +(SubMaskedInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubMaskedInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubMaskedInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubMaskedInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubMaskedInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubMaskedInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubMaskedInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(SubMaskedInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(SubMaskedInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(SubMaskedInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(SubMaskedInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(SubMaskedInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(SubMaskedUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubMaskedUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubMaskedUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubMaskedUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubMaskedUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubMaskedUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubMaskedUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(SubMaskedUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(SubMaskedUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(SubMaskedUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(SubMaskedUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(SubMaskedUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) @@ -1651,12 +1627,24 @@ (TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) (TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) (TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) +(TruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(TruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(TruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(TruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(TruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(TruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) @@ -1677,3 +1665,15 @@ (XorUint64x2 ...) => (VPXOR128 ...) (XorUint64x4 ...) => (VPXOR256 ...) (XorUint64x8 ...) => (VPXORQ512 ...) +(XorMaskedInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(XorMaskedInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(XorMaskedInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(XorMaskedInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(XorMaskedInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(XorMaskedInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(XorMaskedUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(XorMaskedUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(XorMaskedUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(XorMaskedUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(XorMaskedUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(XorMaskedUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 892ecc40431839..5abaa4a0bcc0a9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -4,836 +4,836 @@ package main func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSW256", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLW256", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLW256", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAW256", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVW256", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVW256", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVW256", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVW256", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVW256", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSWMasked512", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDWD512", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVW512", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVW512", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVW512", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVW512", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVW512", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSW128", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLW128", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLW128", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAW128", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVW128", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVW128", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVW128", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVW128", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVW128", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSDMasked512", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVD512", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVD512", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVD512", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVD512", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVD512", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVD512", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVD512", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSDMasked128", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSD128", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLVD128", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORVD128", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLD128", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLD128", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAD128", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVD128", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVD128", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVD128", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVD128", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSDMasked256", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSD256", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLVD256", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORVD256", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLD256", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAD256", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVD256", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVD256", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVD256", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVD256", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVD256", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSQMasked128", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLVQ128", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORVQ128", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ128", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQ128", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQ128", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ128", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVQ128", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVQ128", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVQ128", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSQMasked256", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLVQ256", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORVQ256", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQ256", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVQ256", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVQ256", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQ256", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVQ512", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVQ512", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLQ512", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAQ512", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVQ512", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVQ512", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVQ512", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVQ512", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSBMasked512", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSB512", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGWMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGWMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8MULBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8MULBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8MULBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPSMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPSMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPSMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPSMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VINSERTF128256", argLength: 2, reg: fp21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPDMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPDMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPDMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPDMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPSHLDW256", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDW256", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDW512", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDW512", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDW128", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDW128", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLD512", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORD512", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDD512", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDD512", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD128", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORD128", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDD128", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDD128", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLD256", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORD256", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDD256", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDD256", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLQ128", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQ128", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDQ128", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDQ128", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLQ256", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQ256", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDQ256", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQ256", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLQ512", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORQ512", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDQ512", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDQ512", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: fp11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, @@ -856,22 +856,22 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 54c247eab19932..1079321da71b31 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -4,1681 +4,1681 @@ package main func simdGenericOps() []opData { return []opData{ {name: "AddFloat32x16", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x16", argLength: 3, commutative: true}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x16", argLength: 3, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x16", argLength: 3, commutative: true}, {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, {name: "GreaterFloat32x16", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x16", argLength: 3, commutative: false}, {name: "IsNanFloat32x16", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat32x16", argLength: 3, commutative: true}, {name: "LessFloat32x16", argLength: 2, commutative: false}, {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat32x16", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedLessFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedMinFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedMulFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float32x16", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedSubFloat32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x16", argLength: 3, commutative: false}, {name: "MaxFloat32x16", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MinFloat32x16", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MulFloat32x16", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, + {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x16", argLength: 3, commutative: true}, {name: "SqrtFloat32x16", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x16", argLength: 2, commutative: false}, {name: "SubFloat32x16", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x16", argLength: 3, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x4", argLength: 3, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x4", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x4", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x4", argLength: 4, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x4", argLength: 3, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat32x4", argLength: 3, commutative: true}, {name: "LessFloat32x4", argLength: 2, commutative: false}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat32x4", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedLessFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedMinFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedMulFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float32x4", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedSubFloat32x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, {name: "MaxFloat32x4", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MinFloat32x4", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MulFloat32x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, + {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, {name: "RoundFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x4", argLength: 2, commutative: false}, {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x4", argLength: 3, commutative: false}, {name: "TruncFloat32x4", argLength: 1, commutative: false}, {name: "AddFloat32x8", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x8", argLength: 3, commutative: true}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x8", argLength: 3, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat32x8", argLength: 3, commutative: true}, {name: "LessFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat32x8", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedLessFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedMinFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedMulFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float32x8", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedSubFloat32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x8", argLength: 3, commutative: false}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x8", argLength: 3, commutative: true}, {name: "MinFloat32x8", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x8", argLength: 3, commutative: true}, {name: "MulFloat32x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, + {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x8", argLength: 3, commutative: false}, {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AddMaskedFloat64x2", argLength: 3, commutative: true}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat64x2", argLength: 3, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x2", argLength: 3, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat64x2", argLength: 3, commutative: true}, {name: "LessFloat64x2", argLength: 2, commutative: false}, {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat64x2", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedLessFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedMinFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedMulFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float64x2", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedSubFloat64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x2", argLength: 3, commutative: false}, {name: "MaxFloat64x2", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat64x2", argLength: 3, commutative: true}, {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MinMaskedFloat64x2", argLength: 3, commutative: true}, {name: "MulFloat64x2", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, + {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat64x2", argLength: 3, commutative: true}, {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "SqrtFloat64x2", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat64x2", argLength: 2, commutative: false}, {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "SubMaskedFloat64x2", argLength: 3, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AddMaskedFloat64x4", argLength: 3, commutative: true}, {name: "AddSubFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, + {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat64x4", argLength: 3, commutative: true}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x4", argLength: 3, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat64x4", argLength: 3, commutative: true}, {name: "LessFloat64x4", argLength: 2, commutative: false}, {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat64x4", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedLessFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedMinFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedMulFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float64x4", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedSubFloat64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x4", argLength: 3, commutative: false}, {name: "MaxFloat64x4", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat64x4", argLength: 3, commutative: true}, {name: "MinFloat64x4", argLength: 2, commutative: true}, + {name: "MinMaskedFloat64x4", argLength: 3, commutative: true}, {name: "MulFloat64x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, + {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat64x4", argLength: 3, commutative: true}, {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, {name: "RoundFloat64x4", argLength: 1, commutative: false}, {name: "SqrtFloat64x4", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat64x4", argLength: 2, commutative: false}, {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "SubMaskedFloat64x4", argLength: 3, commutative: false}, {name: "TruncFloat64x4", argLength: 1, commutative: false}, {name: "AddFloat64x8", argLength: 2, commutative: true}, + {name: "AddMaskedFloat64x8", argLength: 3, commutative: true}, {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, + {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat64x8", argLength: 3, commutative: true}, {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "GreaterFloat64x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x8", argLength: 3, commutative: false}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat64x8", argLength: 3, commutative: true}, {name: "LessFloat64x8", argLength: 2, commutative: false}, {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat64x8", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedLessFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedMinFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedMulFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float64x8", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedSubFloat64x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x8", argLength: 3, commutative: false}, {name: "MaxFloat64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat64x8", argLength: 3, commutative: true}, {name: "MinFloat64x8", argLength: 2, commutative: true}, + {name: "MinMaskedFloat64x8", argLength: 3, commutative: true}, {name: "MulFloat64x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x8", argLength: 3, commutative: false}, + {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat64x8", argLength: 3, commutative: true}, {name: "SqrtFloat64x8", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "SubFloat64x8", argLength: 2, commutative: false}, + {name: "SubMaskedFloat64x8", argLength: 3, commutative: false}, {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, {name: "AddInt16x16", argLength: 2, commutative: true}, + {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, {name: "AndNotInt16x16", argLength: 2, commutative: false}, {name: "EqualInt16x16", argLength: 2, commutative: true}, + {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, {name: "GreaterInt16x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x16", argLength: 3, commutative: false}, {name: "LessInt16x16", argLength: 2, commutative: false}, {name: "LessEqualInt16x16", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt16x16", argLength: 2, commutative: false}, - {name: "MaskedAddInt16x16", argLength: 3, commutative: true}, - {name: "MaskedEqualInt16x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt16x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt16x16", argLength: 3, commutative: false}, - {name: "MaskedLessInt16x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt16x16", argLength: 3, commutative: false}, - {name: "MaskedMaxInt16x16", argLength: 3, commutative: true}, - {name: "MaskedMinInt16x16", argLength: 3, commutative: true}, - {name: "MaskedMulHighInt16x16", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt16x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt16x16", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdInt16x16", argLength: 3, commutative: false}, - {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt16x16", argLength: 3, commutative: false}, - {name: "MaskedSubInt16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x16", argLength: 3, commutative: false}, {name: "MaxInt16x16", argLength: 2, commutative: true}, + {name: "MaxMaskedInt16x16", argLength: 3, commutative: true}, {name: "MinInt16x16", argLength: 2, commutative: true}, + {name: "MinMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulHighInt16x16", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulLowInt16x16", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x16", argLength: 3, commutative: true}, {name: "NotEqualInt16x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt16x16", argLength: 3, commutative: true}, {name: "OrInt16x16", argLength: 2, commutative: true}, {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "PopCountInt16x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt16x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt16x16", argLength: 3, commutative: false}, {name: "SignInt16x16", argLength: 2, commutative: false}, {name: "SubInt16x16", argLength: 2, commutative: false}, + {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, {name: "XorInt16x16", argLength: 2, commutative: true}, {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, {name: "AddInt16x32", argLength: 2, commutative: true}, + {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, {name: "EqualInt16x32", argLength: 2, commutative: true}, + {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, {name: "GreaterInt16x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x32", argLength: 3, commutative: false}, {name: "LessInt16x32", argLength: 2, commutative: false}, {name: "LessEqualInt16x32", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt16x32", argLength: 2, commutative: false}, - {name: "MaskedAddInt16x32", argLength: 3, commutative: true}, - {name: "MaskedEqualInt16x32", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt16x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt16x32", argLength: 3, commutative: false}, - {name: "MaskedLessInt16x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt16x32", argLength: 3, commutative: false}, - {name: "MaskedMaxInt16x32", argLength: 3, commutative: true}, - {name: "MaskedMinInt16x32", argLength: 3, commutative: true}, - {name: "MaskedMulHighInt16x32", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt16x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt16x32", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdInt16x32", argLength: 3, commutative: false}, - {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt16x32", argLength: 3, commutative: false}, - {name: "MaskedSubInt16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x32", argLength: 3, commutative: false}, {name: "MaxInt16x32", argLength: 2, commutative: true}, + {name: "MaxMaskedInt16x32", argLength: 3, commutative: true}, {name: "MinInt16x32", argLength: 2, commutative: true}, + {name: "MinMaskedInt16x32", argLength: 3, commutative: true}, {name: "MulHighInt16x32", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, {name: "MulLowInt16x32", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt16x32", argLength: 3, commutative: true}, {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountInt16x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt16x32", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt16x32", argLength: 3, commutative: false}, {name: "SubInt16x32", argLength: 2, commutative: false}, + {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, {name: "AddInt16x8", argLength: 2, commutative: true}, + {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, {name: "AndNotInt16x8", argLength: 2, commutative: false}, {name: "EqualInt16x8", argLength: 2, commutative: true}, + {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, {name: "GreaterInt16x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x8", argLength: 3, commutative: false}, {name: "LessInt16x8", argLength: 2, commutative: false}, {name: "LessEqualInt16x8", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt16x8", argLength: 2, commutative: false}, - {name: "MaskedAddInt16x8", argLength: 3, commutative: true}, - {name: "MaskedEqualInt16x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt16x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt16x8", argLength: 3, commutative: false}, - {name: "MaskedLessInt16x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt16x8", argLength: 3, commutative: false}, - {name: "MaskedMaxInt16x8", argLength: 3, commutative: true}, - {name: "MaskedMinInt16x8", argLength: 3, commutative: true}, - {name: "MaskedMulHighInt16x8", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt16x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt16x8", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdInt16x8", argLength: 3, commutative: false}, - {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt16x8", argLength: 3, commutative: false}, - {name: "MaskedSubInt16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x8", argLength: 3, commutative: false}, {name: "MaxInt16x8", argLength: 2, commutative: true}, + {name: "MaxMaskedInt16x8", argLength: 3, commutative: true}, {name: "MinInt16x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighInt16x8", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulLowInt16x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, {name: "NotEqualInt16x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt16x8", argLength: 3, commutative: true}, {name: "OrInt16x8", argLength: 2, commutative: true}, {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "PopCountInt16x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt16x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt16x8", argLength: 3, commutative: false}, {name: "SignInt16x8", argLength: 2, commutative: false}, {name: "SubInt16x8", argLength: 2, commutative: false}, + {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, {name: "XorInt16x8", argLength: 2, commutative: true}, {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, {name: "AddInt32x16", argLength: 2, commutative: true}, + {name: "AddMaskedInt32x16", argLength: 3, commutative: true}, {name: "AndInt32x16", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, {name: "AndNotInt32x16", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, {name: "EqualInt32x16", argLength: 2, commutative: true}, + {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, {name: "GreaterInt32x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x16", argLength: 3, commutative: false}, {name: "LessInt32x16", argLength: 2, commutative: false}, {name: "LessEqualInt32x16", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt32x16", argLength: 2, commutative: false}, - {name: "MaskedAddInt32x16", argLength: 3, commutative: true}, - {name: "MaskedAndInt32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x16", argLength: 3, commutative: false}, - {name: "MaskedEqualInt32x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt32x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt32x16", argLength: 3, commutative: false}, - {name: "MaskedLessInt32x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt32x16", argLength: 3, commutative: false}, - {name: "MaskedMaxInt32x16", argLength: 3, commutative: true}, - {name: "MaskedMinInt32x16", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt32x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt32x16", argLength: 3, commutative: true}, - {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt32x16", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt32x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftInt32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt32x16", argLength: 3, commutative: false}, - {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x16", argLength: 3, commutative: false}, {name: "MaxInt32x16", argLength: 2, commutative: true}, + {name: "MaxMaskedInt32x16", argLength: 3, commutative: true}, {name: "MinInt32x16", argLength: 2, commutative: true}, + {name: "MinMaskedInt32x16", argLength: 3, commutative: true}, {name: "MulLowInt32x16", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt32x16", argLength: 3, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x16", argLength: 3, commutative: true}, {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt32x16", argLength: 3, commutative: false}, {name: "RotateRightInt32x16", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt32x16", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt32x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt32x16", argLength: 3, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "XorInt32x16", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x16", argLength: 3, commutative: true}, {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, {name: "AddInt32x4", argLength: 2, commutative: true}, + {name: "AddMaskedInt32x4", argLength: 3, commutative: true}, {name: "AndInt32x4", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, {name: "AndNotInt32x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, {name: "EqualInt32x4", argLength: 2, commutative: true}, + {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, {name: "GreaterInt32x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x4", argLength: 3, commutative: false}, {name: "LessInt32x4", argLength: 2, commutative: false}, {name: "LessEqualInt32x4", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt32x4", argLength: 2, commutative: false}, - {name: "MaskedAddInt32x4", argLength: 3, commutative: true}, - {name: "MaskedAndInt32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x4", argLength: 3, commutative: false}, - {name: "MaskedEqualInt32x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt32x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt32x4", argLength: 3, commutative: false}, - {name: "MaskedLessInt32x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt32x4", argLength: 3, commutative: false}, - {name: "MaskedMaxInt32x4", argLength: 3, commutative: true}, - {name: "MaskedMinInt32x4", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt32x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt32x4", argLength: 3, commutative: true}, - {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt32x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt32x4", argLength: 3, commutative: false}, - {name: "MaskedSaturatedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftInt32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt32x4", argLength: 3, commutative: false}, - {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x4", argLength: 3, commutative: false}, {name: "MaxInt32x4", argLength: 2, commutative: true}, + {name: "MaxMaskedInt32x4", argLength: 3, commutative: true}, {name: "MinInt32x4", argLength: 2, commutative: true}, + {name: "MinMaskedInt32x4", argLength: 3, commutative: true}, {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, {name: "MulLowInt32x4", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, {name: "NotEqualInt32x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt32x4", argLength: 3, commutative: true}, {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x4", argLength: 3, commutative: true}, {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt32x4", argLength: 2, commutative: false}, {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "RotateRightInt32x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt32x4", argLength: 2, commutative: false}, {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt32x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt32x4", argLength: 3, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "XorInt32x4", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x4", argLength: 3, commutative: true}, {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, {name: "AddInt32x8", argLength: 2, commutative: true}, + {name: "AddMaskedInt32x8", argLength: 3, commutative: true}, {name: "AndInt32x8", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, {name: "AndNotInt32x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, {name: "EqualInt32x8", argLength: 2, commutative: true}, + {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "GreaterInt32x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x8", argLength: 3, commutative: false}, {name: "LessInt32x8", argLength: 2, commutative: false}, {name: "LessEqualInt32x8", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt32x8", argLength: 2, commutative: false}, - {name: "MaskedAddInt32x8", argLength: 3, commutative: true}, - {name: "MaskedAndInt32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x8", argLength: 3, commutative: false}, - {name: "MaskedEqualInt32x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt32x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt32x8", argLength: 3, commutative: false}, - {name: "MaskedLessInt32x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt32x8", argLength: 3, commutative: false}, - {name: "MaskedMaxInt32x8", argLength: 3, commutative: true}, - {name: "MaskedMinInt32x8", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt32x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt32x8", argLength: 3, commutative: true}, - {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt32x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt32x8", argLength: 3, commutative: false}, - {name: "MaskedSaturatedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftInt32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt32x8", argLength: 3, commutative: false}, - {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x8", argLength: 3, commutative: false}, {name: "MaxInt32x8", argLength: 2, commutative: true}, + {name: "MaxMaskedInt32x8", argLength: 3, commutative: true}, {name: "MinInt32x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt32x8", argLength: 3, commutative: true}, {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, {name: "MulLowInt32x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, {name: "NotEqualInt32x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "OrInt32x8", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x8", argLength: 3, commutative: true}, {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt32x8", argLength: 3, commutative: false}, {name: "RotateRightInt32x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt32x8", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt32x8", argLength: 2, commutative: false}, {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt32x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt32x8", argLength: 3, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "XorInt32x8", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x8", argLength: 3, commutative: true}, {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, {name: "AddInt64x2", argLength: 2, commutative: true}, + {name: "AddMaskedInt64x2", argLength: 3, commutative: true}, {name: "AndInt64x2", argLength: 2, commutative: true}, + {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, {name: "AndNotInt64x2", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, {name: "EqualInt64x2", argLength: 2, commutative: true}, + {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, {name: "GreaterInt64x2", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x2", argLength: 3, commutative: false}, {name: "LessInt64x2", argLength: 2, commutative: false}, {name: "LessEqualInt64x2", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt64x2", argLength: 2, commutative: false}, - {name: "MaskedAddInt64x2", argLength: 3, commutative: true}, - {name: "MaskedAndInt64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x2", argLength: 3, commutative: false}, - {name: "MaskedEqualInt64x2", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt64x2", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt64x2", argLength: 3, commutative: false}, - {name: "MaskedLessInt64x2", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt64x2", argLength: 3, commutative: false}, - {name: "MaskedMaxInt64x2", argLength: 3, commutative: true}, - {name: "MaskedMinInt64x2", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenInt64x2", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt64x2", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt64x2", argLength: 3, commutative: true}, - {name: "MaskedOrInt64x2", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt64x2", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt64x2", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightSignExtendedInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt64x2", argLength: 3, commutative: false}, - {name: "MaskedSubInt64x2", argLength: 3, commutative: false}, - {name: "MaskedXorInt64x2", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x2", argLength: 3, commutative: false}, {name: "MaxInt64x2", argLength: 2, commutative: true}, + {name: "MaxMaskedInt64x2", argLength: 3, commutative: true}, {name: "MinInt64x2", argLength: 2, commutative: true}, + {name: "MinMaskedInt64x2", argLength: 3, commutative: true}, {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, {name: "MulLowInt64x2", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, {name: "NotEqualInt64x2", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt64x2", argLength: 3, commutative: true}, {name: "OrInt64x2", argLength: 2, commutative: true}, + {name: "OrMaskedInt64x2", argLength: 3, commutative: true}, {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "RotateRightInt64x2", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightSignExtendedInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt64x2", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "SubInt64x2", argLength: 2, commutative: false}, + {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, {name: "XorInt64x2", argLength: 2, commutative: true}, + {name: "XorMaskedInt64x2", argLength: 3, commutative: true}, {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, {name: "AddInt64x4", argLength: 2, commutative: true}, + {name: "AddMaskedInt64x4", argLength: 3, commutative: true}, {name: "AndInt64x4", argLength: 2, commutative: true}, + {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, {name: "AndNotInt64x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, {name: "EqualInt64x4", argLength: 2, commutative: true}, + {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, {name: "GreaterInt64x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x4", argLength: 3, commutative: false}, {name: "LessInt64x4", argLength: 2, commutative: false}, {name: "LessEqualInt64x4", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt64x4", argLength: 2, commutative: false}, - {name: "MaskedAddInt64x4", argLength: 3, commutative: true}, - {name: "MaskedAndInt64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x4", argLength: 3, commutative: false}, - {name: "MaskedEqualInt64x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt64x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt64x4", argLength: 3, commutative: false}, - {name: "MaskedLessInt64x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt64x4", argLength: 3, commutative: false}, - {name: "MaskedMaxInt64x4", argLength: 3, commutative: true}, - {name: "MaskedMinInt64x4", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenInt64x4", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt64x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt64x4", argLength: 3, commutative: true}, - {name: "MaskedOrInt64x4", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt64x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt64x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightSignExtendedInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt64x4", argLength: 3, commutative: false}, - {name: "MaskedSubInt64x4", argLength: 3, commutative: false}, - {name: "MaskedXorInt64x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x4", argLength: 3, commutative: false}, {name: "MaxInt64x4", argLength: 2, commutative: true}, + {name: "MaxMaskedInt64x4", argLength: 3, commutative: true}, {name: "MinInt64x4", argLength: 2, commutative: true}, + {name: "MinMaskedInt64x4", argLength: 3, commutative: true}, {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, {name: "MulLowInt64x4", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, {name: "NotEqualInt64x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt64x4", argLength: 3, commutative: true}, {name: "OrInt64x4", argLength: 2, commutative: true}, + {name: "OrMaskedInt64x4", argLength: 3, commutative: true}, {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "RotateRightInt64x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightSignExtendedInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt64x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, + {name: "SubMaskedInt64x4", argLength: 3, commutative: false}, {name: "XorInt64x4", argLength: 2, commutative: true}, + {name: "XorMaskedInt64x4", argLength: 3, commutative: true}, {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, {name: "AddInt64x8", argLength: 2, commutative: true}, + {name: "AddMaskedInt64x8", argLength: 3, commutative: true}, {name: "AndInt64x8", argLength: 2, commutative: true}, + {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, {name: "AndNotInt64x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, {name: "EqualInt64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, {name: "GreaterInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x8", argLength: 3, commutative: false}, {name: "LessInt64x8", argLength: 2, commutative: false}, {name: "LessEqualInt64x8", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt64x8", argLength: 2, commutative: false}, - {name: "MaskedAddInt64x8", argLength: 3, commutative: true}, - {name: "MaskedAndInt64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x8", argLength: 3, commutative: false}, - {name: "MaskedEqualInt64x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt64x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt64x8", argLength: 3, commutative: false}, - {name: "MaskedLessInt64x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt64x8", argLength: 3, commutative: false}, - {name: "MaskedMaxInt64x8", argLength: 3, commutative: true}, - {name: "MaskedMinInt64x8", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenInt64x8", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt64x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt64x8", argLength: 3, commutative: true}, - {name: "MaskedOrInt64x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt64x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt64x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightSignExtendedInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt64x8", argLength: 3, commutative: false}, - {name: "MaskedSubInt64x8", argLength: 3, commutative: false}, - {name: "MaskedXorInt64x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x8", argLength: 3, commutative: false}, {name: "MaxInt64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedInt64x8", argLength: 3, commutative: true}, {name: "MinInt64x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt64x8", argLength: 3, commutative: true}, {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, {name: "MulLowInt64x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, {name: "NotEqualInt64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt64x8", argLength: 3, commutative: true}, {name: "OrInt64x8", argLength: 2, commutative: true}, + {name: "OrMaskedInt64x8", argLength: 3, commutative: true}, {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "RotateRightInt64x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightSignExtendedInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt64x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "SubInt64x8", argLength: 2, commutative: false}, + {name: "SubMaskedInt64x8", argLength: 3, commutative: false}, {name: "XorInt64x8", argLength: 2, commutative: true}, + {name: "XorMaskedInt64x8", argLength: 3, commutative: true}, {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, {name: "AddInt8x16", argLength: 2, commutative: true}, + {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, {name: "AndNotInt8x16", argLength: 2, commutative: false}, {name: "EqualInt8x16", argLength: 2, commutative: true}, + {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, {name: "GreaterInt8x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x16", argLength: 3, commutative: false}, {name: "LessInt8x16", argLength: 2, commutative: false}, {name: "LessEqualInt8x16", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt8x16", argLength: 2, commutative: false}, - {name: "MaskedAddInt8x16", argLength: 3, commutative: true}, - {name: "MaskedEqualInt8x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt8x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt8x16", argLength: 3, commutative: false}, - {name: "MaskedLessInt8x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt8x16", argLength: 3, commutative: false}, - {name: "MaskedMaxInt8x16", argLength: 3, commutative: true}, - {name: "MaskedMinInt8x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt8x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt8x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt8x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt8x16", argLength: 3, commutative: false}, - {name: "MaskedSubInt8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x16", argLength: 3, commutative: false}, {name: "MaxInt8x16", argLength: 2, commutative: true}, + {name: "MaxMaskedInt8x16", argLength: 3, commutative: true}, {name: "MinInt8x16", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x16", argLength: 3, commutative: true}, {name: "NotEqualInt8x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt8x16", argLength: 3, commutative: true}, {name: "OrInt8x16", argLength: 2, commutative: true}, {name: "PopCountInt8x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, {name: "SignInt8x16", argLength: 2, commutative: false}, {name: "SubInt8x16", argLength: 2, commutative: false}, + {name: "SubMaskedInt8x16", argLength: 3, commutative: false}, {name: "XorInt8x16", argLength: 2, commutative: true}, {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, {name: "AddInt8x32", argLength: 2, commutative: true}, + {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, {name: "AndNotInt8x32", argLength: 2, commutative: false}, {name: "EqualInt8x32", argLength: 2, commutative: true}, + {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, {name: "GreaterInt8x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x32", argLength: 3, commutative: false}, {name: "LessInt8x32", argLength: 2, commutative: false}, {name: "LessEqualInt8x32", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt8x32", argLength: 2, commutative: false}, - {name: "MaskedAddInt8x32", argLength: 3, commutative: true}, - {name: "MaskedEqualInt8x32", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt8x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt8x32", argLength: 3, commutative: false}, - {name: "MaskedLessInt8x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt8x32", argLength: 3, commutative: false}, - {name: "MaskedMaxInt8x32", argLength: 3, commutative: true}, - {name: "MaskedMinInt8x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt8x32", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt8x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt8x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt8x32", argLength: 3, commutative: false}, - {name: "MaskedSubInt8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x32", argLength: 3, commutative: false}, {name: "MaxInt8x32", argLength: 2, commutative: true}, + {name: "MaxMaskedInt8x32", argLength: 3, commutative: true}, {name: "MinInt8x32", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x32", argLength: 3, commutative: true}, {name: "NotEqualInt8x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt8x32", argLength: 3, commutative: true}, {name: "OrInt8x32", argLength: 2, commutative: true}, {name: "PopCountInt8x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, {name: "SignInt8x32", argLength: 2, commutative: false}, {name: "SubInt8x32", argLength: 2, commutative: false}, + {name: "SubMaskedInt8x32", argLength: 3, commutative: false}, {name: "XorInt8x32", argLength: 2, commutative: true}, {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, {name: "AddInt8x64", argLength: 2, commutative: true}, + {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, {name: "EqualInt8x64", argLength: 2, commutative: true}, + {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, {name: "GreaterInt8x64", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x64", argLength: 3, commutative: false}, {name: "LessInt8x64", argLength: 2, commutative: false}, {name: "LessEqualInt8x64", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt8x64", argLength: 2, commutative: false}, - {name: "MaskedAddInt8x64", argLength: 3, commutative: true}, - {name: "MaskedEqualInt8x64", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt8x64", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt8x64", argLength: 3, commutative: false}, - {name: "MaskedLessInt8x64", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt8x64", argLength: 3, commutative: false}, - {name: "MaskedMaxInt8x64", argLength: 3, commutative: true}, - {name: "MaskedMinInt8x64", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt8x64", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt8x64", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt8x64", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt8x64", argLength: 3, commutative: false}, - {name: "MaskedSubInt8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x64", argLength: 3, commutative: false}, {name: "MaxInt8x64", argLength: 2, commutative: true}, + {name: "MaxMaskedInt8x64", argLength: 3, commutative: true}, {name: "MinInt8x64", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x64", argLength: 3, commutative: true}, {name: "NotEqualInt8x64", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt8x64", argLength: 3, commutative: true}, {name: "PopCountInt8x64", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, {name: "SubInt8x64", argLength: 2, commutative: false}, + {name: "SubMaskedInt8x64", argLength: 3, commutative: false}, {name: "AddUint16x16", argLength: 2, commutative: true}, + {name: "AddMaskedUint16x16", argLength: 3, commutative: true}, {name: "AndUint16x16", argLength: 2, commutative: true}, {name: "AndNotUint16x16", argLength: 2, commutative: false}, {name: "AverageUint16x16", argLength: 2, commutative: true}, + {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, {name: "EqualUint16x16", argLength: 2, commutative: true}, + {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, {name: "GreaterUint16x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x16", argLength: 3, commutative: false}, {name: "LessUint16x16", argLength: 2, commutative: false}, {name: "LessEqualUint16x16", argLength: 2, commutative: false}, - {name: "MaskedAddUint16x16", argLength: 3, commutative: true}, - {name: "MaskedAverageUint16x16", argLength: 3, commutative: true}, - {name: "MaskedEqualUint16x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint16x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint16x16", argLength: 3, commutative: false}, - {name: "MaskedLessUint16x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint16x16", argLength: 3, commutative: false}, - {name: "MaskedMaxUint16x16", argLength: 3, commutative: true}, - {name: "MaskedMinUint16x16", argLength: 3, commutative: true}, - {name: "MaskedMulHighUint16x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint16x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint16x16", argLength: 3, commutative: false}, - {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, + {name: "MaxMaskedUint16x16", argLength: 3, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, + {name: "MinMaskedUint16x16", argLength: 3, commutative: true}, {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, {name: "NotEqualUint16x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint16x16", argLength: 3, commutative: true}, {name: "OrUint16x16", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint16x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint16x16", argLength: 3, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint16x16", argLength: 3, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, + {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, {name: "EqualUint16x32", argLength: 2, commutative: true}, + {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, {name: "GreaterUint16x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x32", argLength: 3, commutative: false}, {name: "LessUint16x32", argLength: 2, commutative: false}, {name: "LessEqualUint16x32", argLength: 2, commutative: false}, - {name: "MaskedAddUint16x32", argLength: 3, commutative: true}, - {name: "MaskedAverageUint16x32", argLength: 3, commutative: true}, - {name: "MaskedEqualUint16x32", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint16x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint16x32", argLength: 3, commutative: false}, - {name: "MaskedLessUint16x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint16x32", argLength: 3, commutative: false}, - {name: "MaskedMaxUint16x32", argLength: 3, commutative: true}, - {name: "MaskedMinUint16x32", argLength: 3, commutative: true}, - {name: "MaskedMulHighUint16x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint16x32", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint16x32", argLength: 3, commutative: false}, - {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, + {name: "MaxMaskedUint16x32", argLength: 3, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, + {name: "MinMaskedUint16x32", argLength: 3, commutative: true}, {name: "MulHighUint16x32", argLength: 2, commutative: true}, + {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint16x32", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint16x32", argLength: 3, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, + {name: "SubMaskedUint16x32", argLength: 3, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, + {name: "AddMaskedUint16x8", argLength: 3, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AverageUint16x8", argLength: 2, commutative: true}, + {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, {name: "EqualUint16x8", argLength: 2, commutative: true}, + {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, {name: "GreaterUint16x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x8", argLength: 3, commutative: false}, {name: "LessUint16x8", argLength: 2, commutative: false}, {name: "LessEqualUint16x8", argLength: 2, commutative: false}, - {name: "MaskedAddUint16x8", argLength: 3, commutative: true}, - {name: "MaskedAverageUint16x8", argLength: 3, commutative: true}, - {name: "MaskedEqualUint16x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint16x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint16x8", argLength: 3, commutative: false}, - {name: "MaskedLessUint16x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint16x8", argLength: 3, commutative: false}, - {name: "MaskedMaxUint16x8", argLength: 3, commutative: true}, - {name: "MaskedMinUint16x8", argLength: 3, commutative: true}, - {name: "MaskedMulHighUint16x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint16x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint16x8", argLength: 3, commutative: false}, - {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, + {name: "MaxMaskedUint16x8", argLength: 3, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, + {name: "MinMaskedUint16x8", argLength: 3, commutative: true}, {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, {name: "NotEqualUint16x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint16x8", argLength: 3, commutative: true}, {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint16x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint16x8", argLength: 3, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, + {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, + {name: "AddMaskedUint32x16", argLength: 3, commutative: true}, {name: "AndUint32x16", argLength: 2, commutative: true}, + {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, {name: "AndNotUint32x16", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, {name: "EqualUint32x16", argLength: 2, commutative: true}, + {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x16", argLength: 3, commutative: false}, {name: "LessUint32x16", argLength: 2, commutative: false}, {name: "LessEqualUint32x16", argLength: 2, commutative: false}, - {name: "MaskedAddUint32x16", argLength: 3, commutative: true}, - {name: "MaskedAndUint32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x16", argLength: 3, commutative: false}, - {name: "MaskedEqualUint32x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint32x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint32x16", argLength: 3, commutative: false}, - {name: "MaskedLessUint32x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint32x16", argLength: 3, commutative: false}, - {name: "MaskedMaxUint32x16", argLength: 3, commutative: true}, - {name: "MaskedMinUint32x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, - {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint32x16", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint32x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftUint32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint32x16", argLength: 3, commutative: false}, - {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, - {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x16", argLength: 3, commutative: false}, {name: "MaxUint32x16", argLength: 2, commutative: true}, + {name: "MaxMaskedUint32x16", argLength: 3, commutative: true}, {name: "MinUint32x16", argLength: 2, commutative: true}, + {name: "MinMaskedUint32x16", argLength: 3, commutative: true}, {name: "NotEqualUint32x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, + {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "RotateRightUint32x16", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint32x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, + {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, + {name: "AddMaskedUint32x4", argLength: 3, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, + {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, {name: "AndNotUint32x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, {name: "EqualUint32x4", argLength: 2, commutative: true}, + {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, {name: "GreaterUint32x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x4", argLength: 3, commutative: false}, {name: "LessUint32x4", argLength: 2, commutative: false}, {name: "LessEqualUint32x4", argLength: 2, commutative: false}, - {name: "MaskedAddUint32x4", argLength: 3, commutative: true}, - {name: "MaskedAndUint32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x4", argLength: 3, commutative: false}, - {name: "MaskedEqualUint32x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint32x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint32x4", argLength: 3, commutative: false}, - {name: "MaskedLessUint32x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint32x4", argLength: 3, commutative: false}, - {name: "MaskedMaxUint32x4", argLength: 3, commutative: true}, - {name: "MaskedMinUint32x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, - {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint32x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint32x4", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftUint32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint32x4", argLength: 3, commutative: false}, - {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, - {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x4", argLength: 3, commutative: false}, {name: "MaxUint32x4", argLength: 2, commutative: true}, + {name: "MaxMaskedUint32x4", argLength: 3, commutative: true}, {name: "MinUint32x4", argLength: 2, commutative: true}, + {name: "MinMaskedUint32x4", argLength: 3, commutative: true}, {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, {name: "NotEqualUint32x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint32x4", argLength: 3, commutative: true}, {name: "OrUint32x4", argLength: 2, commutative: true}, + {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "RotateRightUint32x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint32x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, + {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, + {name: "AddMaskedUint32x8", argLength: 3, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, + {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, {name: "AndNotUint32x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, {name: "EqualUint32x8", argLength: 2, commutative: true}, + {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, {name: "GreaterUint32x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x8", argLength: 3, commutative: false}, {name: "LessUint32x8", argLength: 2, commutative: false}, {name: "LessEqualUint32x8", argLength: 2, commutative: false}, - {name: "MaskedAddUint32x8", argLength: 3, commutative: true}, - {name: "MaskedAndUint32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x8", argLength: 3, commutative: false}, - {name: "MaskedEqualUint32x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint32x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint32x8", argLength: 3, commutative: false}, - {name: "MaskedLessUint32x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint32x8", argLength: 3, commutative: false}, - {name: "MaskedMaxUint32x8", argLength: 3, commutative: true}, - {name: "MaskedMinUint32x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, - {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint32x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint32x8", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftUint32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint32x8", argLength: 3, commutative: false}, - {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, - {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x8", argLength: 3, commutative: false}, {name: "MaxUint32x8", argLength: 2, commutative: true}, + {name: "MaxMaskedUint32x8", argLength: 3, commutative: true}, {name: "MinUint32x8", argLength: 2, commutative: true}, + {name: "MinMaskedUint32x8", argLength: 3, commutative: true}, {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, {name: "NotEqualUint32x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint32x8", argLength: 3, commutative: true}, {name: "OrUint32x8", argLength: 2, commutative: true}, + {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "RotateRightUint32x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint32x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, + {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, + {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, {name: "AndUint64x2", argLength: 2, commutative: true}, + {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, {name: "AndNotUint64x2", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, {name: "EqualUint64x2", argLength: 2, commutative: true}, + {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "GreaterUint64x2", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, {name: "LessUint64x2", argLength: 2, commutative: false}, {name: "LessEqualUint64x2", argLength: 2, commutative: false}, - {name: "MaskedAddUint64x2", argLength: 3, commutative: true}, - {name: "MaskedAndUint64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x2", argLength: 3, commutative: false}, - {name: "MaskedEqualUint64x2", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint64x2", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint64x2", argLength: 3, commutative: false}, - {name: "MaskedLessUint64x2", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint64x2", argLength: 3, commutative: false}, - {name: "MaskedMaxUint64x2", argLength: 3, commutative: true}, - {name: "MaskedMinUint64x2", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenUint64x2", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint64x2", argLength: 3, commutative: true}, - {name: "MaskedOrUint64x2", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint64x2", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint64x2", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint64x2", argLength: 3, commutative: false}, - {name: "MaskedSubUint64x2", argLength: 3, commutative: false}, - {name: "MaskedXorUint64x2", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, {name: "MaxUint64x2", argLength: 2, commutative: true}, + {name: "MaxMaskedUint64x2", argLength: 3, commutative: true}, {name: "MinUint64x2", argLength: 2, commutative: true}, + {name: "MinMaskedUint64x2", argLength: 3, commutative: true}, {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, {name: "NotEqualUint64x2", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, + {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint64x2", argLength: 3, commutative: false}, {name: "RotateRightUint64x2", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint64x2", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint64x2", argLength: 3, commutative: false}, {name: "SubUint64x2", argLength: 2, commutative: false}, + {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, {name: "XorUint64x2", argLength: 2, commutative: true}, + {name: "XorMaskedUint64x2", argLength: 3, commutative: true}, {name: "AddUint64x4", argLength: 2, commutative: true}, + {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, {name: "AndUint64x4", argLength: 2, commutative: true}, + {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, {name: "AndNotUint64x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, {name: "EqualUint64x4", argLength: 2, commutative: true}, + {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "GreaterUint64x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, {name: "LessUint64x4", argLength: 2, commutative: false}, {name: "LessEqualUint64x4", argLength: 2, commutative: false}, - {name: "MaskedAddUint64x4", argLength: 3, commutative: true}, - {name: "MaskedAndUint64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x4", argLength: 3, commutative: false}, - {name: "MaskedEqualUint64x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint64x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint64x4", argLength: 3, commutative: false}, - {name: "MaskedLessUint64x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint64x4", argLength: 3, commutative: false}, - {name: "MaskedMaxUint64x4", argLength: 3, commutative: true}, - {name: "MaskedMinUint64x4", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenUint64x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint64x4", argLength: 3, commutative: true}, - {name: "MaskedOrUint64x4", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint64x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint64x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint64x4", argLength: 3, commutative: false}, - {name: "MaskedSubUint64x4", argLength: 3, commutative: false}, - {name: "MaskedXorUint64x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, {name: "MaxUint64x4", argLength: 2, commutative: true}, + {name: "MaxMaskedUint64x4", argLength: 3, commutative: true}, {name: "MinUint64x4", argLength: 2, commutative: true}, + {name: "MinMaskedUint64x4", argLength: 3, commutative: true}, {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, {name: "NotEqualUint64x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, + {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint64x4", argLength: 3, commutative: false}, {name: "RotateRightUint64x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint64x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint64x4", argLength: 3, commutative: false}, {name: "SubUint64x4", argLength: 2, commutative: false}, + {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, {name: "XorUint64x4", argLength: 2, commutative: true}, + {name: "XorMaskedUint64x4", argLength: 3, commutative: true}, {name: "AddUint64x8", argLength: 2, commutative: true}, + {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndUint64x8", argLength: 2, commutative: true}, + {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndNotUint64x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, {name: "EqualUint64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, {name: "LessUint64x8", argLength: 2, commutative: false}, {name: "LessEqualUint64x8", argLength: 2, commutative: false}, - {name: "MaskedAddUint64x8", argLength: 3, commutative: true}, - {name: "MaskedAndUint64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x8", argLength: 3, commutative: false}, - {name: "MaskedEqualUint64x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint64x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint64x8", argLength: 3, commutative: false}, - {name: "MaskedLessUint64x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint64x8", argLength: 3, commutative: false}, - {name: "MaskedMaxUint64x8", argLength: 3, commutative: true}, - {name: "MaskedMinUint64x8", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenUint64x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint64x8", argLength: 3, commutative: true}, - {name: "MaskedOrUint64x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint64x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint64x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint64x8", argLength: 3, commutative: false}, - {name: "MaskedSubUint64x8", argLength: 3, commutative: false}, - {name: "MaskedXorUint64x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, {name: "MaxUint64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedUint64x8", argLength: 3, commutative: true}, {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MinMaskedUint64x8", argLength: 3, commutative: true}, {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, + {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "RotateRightUint64x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint64x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint64x8", argLength: 3, commutative: false}, {name: "SubUint64x8", argLength: 2, commutative: false}, + {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, {name: "XorUint64x8", argLength: 2, commutative: true}, + {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, {name: "AddUint8x16", argLength: 2, commutative: true}, + {name: "AddMaskedUint8x16", argLength: 3, commutative: true}, {name: "AndUint8x16", argLength: 2, commutative: true}, {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, + {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, {name: "GreaterUint8x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x16", argLength: 3, commutative: false}, {name: "LessUint8x16", argLength: 2, commutative: false}, {name: "LessEqualUint8x16", argLength: 2, commutative: false}, - {name: "MaskedAddUint8x16", argLength: 3, commutative: true}, - {name: "MaskedAverageUint8x16", argLength: 3, commutative: true}, - {name: "MaskedEqualUint8x16", argLength: 3, commutative: true}, - {name: "MaskedGaloisFieldMulUint8x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterUint8x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint8x16", argLength: 3, commutative: false}, - {name: "MaskedLessUint8x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint8x16", argLength: 3, commutative: false}, - {name: "MaskedMaxUint8x16", argLength: 3, commutative: true}, - {name: "MaskedMinUint8x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint8x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint8x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint8x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint8x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", argLength: 3, commutative: false}, - {name: "MaskedSubUint8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x16", argLength: 3, commutative: false}, {name: "MaxUint8x16", argLength: 2, commutative: true}, + {name: "MaxMaskedUint8x16", argLength: 3, commutative: true}, {name: "MinUint8x16", argLength: 2, commutative: true}, + {name: "MinMaskedUint8x16", argLength: 3, commutative: true}, {name: "NotEqualUint8x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "PopCountUint8x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, {name: "SubUint8x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x16", argLength: 3, commutative: false}, {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "AddUint8x32", argLength: 2, commutative: true}, + {name: "AddMaskedUint8x32", argLength: 3, commutative: true}, {name: "AndUint8x32", argLength: 2, commutative: true}, {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, + {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, {name: "GreaterUint8x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x32", argLength: 3, commutative: false}, {name: "LessUint8x32", argLength: 2, commutative: false}, {name: "LessEqualUint8x32", argLength: 2, commutative: false}, - {name: "MaskedAddUint8x32", argLength: 3, commutative: true}, - {name: "MaskedAverageUint8x32", argLength: 3, commutative: true}, - {name: "MaskedEqualUint8x32", argLength: 3, commutative: true}, - {name: "MaskedGaloisFieldMulUint8x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterUint8x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint8x32", argLength: 3, commutative: false}, - {name: "MaskedLessUint8x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint8x32", argLength: 3, commutative: false}, - {name: "MaskedMaxUint8x32", argLength: 3, commutative: true}, - {name: "MaskedMinUint8x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint8x32", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint8x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint8x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint8x32", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", argLength: 3, commutative: false}, - {name: "MaskedSubUint8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x32", argLength: 3, commutative: false}, {name: "MaxUint8x32", argLength: 2, commutative: true}, + {name: "MaxMaskedUint8x32", argLength: 3, commutative: true}, {name: "MinUint8x32", argLength: 2, commutative: true}, + {name: "MinMaskedUint8x32", argLength: 3, commutative: true}, {name: "NotEqualUint8x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "OrUint8x32", argLength: 2, commutative: true}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, {name: "SubUint8x32", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x32", argLength: 3, commutative: false}, {name: "XorUint8x32", argLength: 2, commutative: true}, {name: "AddUint8x64", argLength: 2, commutative: true}, + {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, + {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x64", argLength: 3, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, {name: "LessEqualUint8x64", argLength: 2, commutative: false}, - {name: "MaskedAddUint8x64", argLength: 3, commutative: true}, - {name: "MaskedAverageUint8x64", argLength: 3, commutative: true}, - {name: "MaskedEqualUint8x64", argLength: 3, commutative: true}, - {name: "MaskedGaloisFieldMulUint8x64", argLength: 3, commutative: false}, - {name: "MaskedGreaterUint8x64", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint8x64", argLength: 3, commutative: false}, - {name: "MaskedLessUint8x64", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint8x64", argLength: 3, commutative: false}, - {name: "MaskedMaxUint8x64", argLength: 3, commutative: true}, - {name: "MaskedMinUint8x64", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint8x64", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint8x64", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint8x64", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint8x64", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", argLength: 3, commutative: false}, - {name: "MaskedSubUint8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x64", argLength: 3, commutative: false}, {name: "MaxUint8x64", argLength: 2, commutative: true}, + {name: "MaxMaskedUint8x64", argLength: 3, commutative: true}, {name: "MinUint8x64", argLength: 2, commutative: true}, + {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, {name: "PopCountUint8x64", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x64", argLength: 3, commutative: false}, {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformInversedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformInversedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 48428ead1f5735..4251c013a8cb1d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1197,836 +1197,836 @@ const ( OpAMD64Zero256 OpAMD64Zero512 OpAMD64VADDPS512 - OpAMD64VRCP14PS512 - OpAMD64VRSQRT14PS512 - OpAMD64VDIVPS512 - OpAMD64VFMADD213PS512 - OpAMD64VFMADDSUB213PS512 - OpAMD64VFMSUBADD213PS512 OpAMD64VADDPSMasked512 + OpAMD64VRCP14PS512 OpAMD64VRCP14PSMasked512 + OpAMD64VRSQRT14PS512 OpAMD64VRSQRT14PSMasked512 + OpAMD64VDIVPS512 OpAMD64VDIVPSMasked512 + OpAMD64VFMADD213PS512 OpAMD64VFMADD213PSMasked512 + OpAMD64VFMADDSUB213PS512 OpAMD64VFMADDSUB213PSMasked512 + OpAMD64VFMSUBADD213PS512 OpAMD64VFMSUBADD213PSMasked512 - OpAMD64VMAXPSMasked512 - OpAMD64VMINPSMasked512 - OpAMD64VMULPSMasked512 - OpAMD64VSCALEFPSMasked512 - OpAMD64VSQRTPSMasked512 - OpAMD64VSUBPSMasked512 OpAMD64VMAXPS512 + OpAMD64VMAXPSMasked512 OpAMD64VMINPS512 + OpAMD64VMINPSMasked512 OpAMD64VMULPS512 OpAMD64VSCALEFPS512 + OpAMD64VSCALEFPSMasked512 + OpAMD64VMULPSMasked512 OpAMD64VSQRTPS512 + OpAMD64VSQRTPSMasked512 OpAMD64VSUBPS512 + OpAMD64VSUBPSMasked512 OpAMD64VADDPS128 + OpAMD64VADDPSMasked128 OpAMD64VADDSUBPS128 OpAMD64VRCP14PS128 - OpAMD64VRSQRTPS128 - OpAMD64VDIVPS128 - OpAMD64VFMADD213PS128 - OpAMD64VFMADDSUB213PS128 - OpAMD64VFMSUBADD213PS128 - OpAMD64VADDPSMasked128 OpAMD64VRCP14PSMasked128 + OpAMD64VRSQRTPS128 OpAMD64VRSQRT14PSMasked128 + OpAMD64VDIVPS128 OpAMD64VDIVPSMasked128 + OpAMD64VFMADD213PS128 OpAMD64VFMADD213PSMasked128 + OpAMD64VFMADDSUB213PS128 OpAMD64VFMADDSUB213PSMasked128 + OpAMD64VFMSUBADD213PS128 OpAMD64VFMSUBADD213PSMasked128 - OpAMD64VMAXPSMasked128 - OpAMD64VMINPSMasked128 - OpAMD64VMULPSMasked128 - OpAMD64VSCALEFPSMasked128 - OpAMD64VSQRTPSMasked128 - OpAMD64VSUBPSMasked128 OpAMD64VMAXPS128 + OpAMD64VMAXPSMasked128 OpAMD64VMINPS128 + OpAMD64VMINPSMasked128 OpAMD64VMULPS128 OpAMD64VSCALEFPS128 + OpAMD64VSCALEFPSMasked128 + OpAMD64VMULPSMasked128 OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 + OpAMD64VSQRTPSMasked128 OpAMD64VSUBPS128 + OpAMD64VSUBPSMasked128 OpAMD64VADDPS256 + OpAMD64VADDPSMasked256 OpAMD64VADDSUBPS256 OpAMD64VRCP14PS256 - OpAMD64VRSQRTPS256 - OpAMD64VDIVPS256 - OpAMD64VFMADD213PS256 - OpAMD64VFMADDSUB213PS256 - OpAMD64VFMSUBADD213PS256 - OpAMD64VADDPSMasked256 OpAMD64VRCP14PSMasked256 + OpAMD64VRSQRTPS256 OpAMD64VRSQRT14PSMasked256 + OpAMD64VDIVPS256 OpAMD64VDIVPSMasked256 + OpAMD64VFMADD213PS256 OpAMD64VFMADD213PSMasked256 + OpAMD64VFMADDSUB213PS256 OpAMD64VFMADDSUB213PSMasked256 + OpAMD64VFMSUBADD213PS256 OpAMD64VFMSUBADD213PSMasked256 - OpAMD64VMAXPSMasked256 - OpAMD64VMINPSMasked256 - OpAMD64VMULPSMasked256 - OpAMD64VSCALEFPSMasked256 - OpAMD64VSQRTPSMasked256 - OpAMD64VSUBPSMasked256 OpAMD64VMAXPS256 + OpAMD64VMAXPSMasked256 OpAMD64VMINPS256 + OpAMD64VMINPSMasked256 OpAMD64VMULPS256 OpAMD64VSCALEFPS256 + OpAMD64VSCALEFPSMasked256 + OpAMD64VMULPSMasked256 OpAMD64VHADDPS256 OpAMD64VHSUBPS256 OpAMD64VSQRTPS256 + OpAMD64VSQRTPSMasked256 OpAMD64VSUBPS256 + OpAMD64VSUBPSMasked256 OpAMD64VADDPD128 + OpAMD64VADDPDMasked128 OpAMD64VADDSUBPD128 OpAMD64VRCP14PD128 - OpAMD64VRSQRT14PD128 - OpAMD64VDIVPD128 - OpAMD64VFMADD213PD128 - OpAMD64VFMADDSUB213PD128 - OpAMD64VFMSUBADD213PD128 - OpAMD64VADDPDMasked128 OpAMD64VRCP14PDMasked128 + OpAMD64VRSQRT14PD128 OpAMD64VRSQRT14PDMasked128 + OpAMD64VDIVPD128 OpAMD64VDIVPDMasked128 + OpAMD64VFMADD213PD128 OpAMD64VFMADD213PDMasked128 + OpAMD64VFMADDSUB213PD128 OpAMD64VFMADDSUB213PDMasked128 + OpAMD64VFMSUBADD213PD128 OpAMD64VFMSUBADD213PDMasked128 - OpAMD64VMAXPDMasked128 - OpAMD64VMINPDMasked128 - OpAMD64VMULPDMasked128 - OpAMD64VSCALEFPDMasked128 - OpAMD64VSQRTPDMasked128 - OpAMD64VSUBPDMasked128 OpAMD64VMAXPD128 + OpAMD64VMAXPDMasked128 OpAMD64VMINPD128 + OpAMD64VMINPDMasked128 OpAMD64VMULPD128 OpAMD64VSCALEFPD128 + OpAMD64VSCALEFPDMasked128 + OpAMD64VMULPDMasked128 OpAMD64VHADDPD128 OpAMD64VHSUBPD128 OpAMD64VSQRTPD128 + OpAMD64VSQRTPDMasked128 OpAMD64VSUBPD128 + OpAMD64VSUBPDMasked128 OpAMD64VADDPD256 + OpAMD64VADDPDMasked256 OpAMD64VADDSUBPD256 OpAMD64VRCP14PD256 - OpAMD64VRSQRT14PD256 - OpAMD64VDIVPD256 - OpAMD64VFMADD213PD256 - OpAMD64VFMADDSUB213PD256 - OpAMD64VFMSUBADD213PD256 - OpAMD64VADDPDMasked256 OpAMD64VRCP14PDMasked256 + OpAMD64VRSQRT14PD256 OpAMD64VRSQRT14PDMasked256 + OpAMD64VDIVPD256 OpAMD64VDIVPDMasked256 + OpAMD64VFMADD213PD256 OpAMD64VFMADD213PDMasked256 + OpAMD64VFMADDSUB213PD256 OpAMD64VFMADDSUB213PDMasked256 + OpAMD64VFMSUBADD213PD256 OpAMD64VFMSUBADD213PDMasked256 - OpAMD64VMAXPDMasked256 - OpAMD64VMINPDMasked256 - OpAMD64VMULPDMasked256 - OpAMD64VSCALEFPDMasked256 - OpAMD64VSQRTPDMasked256 - OpAMD64VSUBPDMasked256 OpAMD64VMAXPD256 + OpAMD64VMAXPDMasked256 OpAMD64VMINPD256 + OpAMD64VMINPDMasked256 OpAMD64VMULPD256 OpAMD64VSCALEFPD256 + OpAMD64VSCALEFPDMasked256 + OpAMD64VMULPDMasked256 OpAMD64VHADDPD256 OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 + OpAMD64VSQRTPDMasked256 OpAMD64VSUBPD256 + OpAMD64VSUBPDMasked256 OpAMD64VADDPD512 - OpAMD64VRCP14PD512 - OpAMD64VRSQRT14PD512 - OpAMD64VDIVPD512 - OpAMD64VFMADD213PD512 - OpAMD64VFMADDSUB213PD512 - OpAMD64VFMSUBADD213PD512 OpAMD64VADDPDMasked512 + OpAMD64VRCP14PD512 OpAMD64VRCP14PDMasked512 + OpAMD64VRSQRT14PD512 OpAMD64VRSQRT14PDMasked512 + OpAMD64VDIVPD512 OpAMD64VDIVPDMasked512 + OpAMD64VFMADD213PD512 OpAMD64VFMADD213PDMasked512 + OpAMD64VFMADDSUB213PD512 OpAMD64VFMADDSUB213PDMasked512 + OpAMD64VFMSUBADD213PD512 OpAMD64VFMSUBADD213PDMasked512 - OpAMD64VMAXPDMasked512 - OpAMD64VMINPDMasked512 - OpAMD64VMULPDMasked512 - OpAMD64VSCALEFPDMasked512 - OpAMD64VSQRTPDMasked512 - OpAMD64VSUBPDMasked512 OpAMD64VMAXPD512 + OpAMD64VMAXPDMasked512 OpAMD64VMINPD512 + OpAMD64VMINPDMasked512 OpAMD64VMULPD512 OpAMD64VSCALEFPD512 + OpAMD64VSCALEFPDMasked512 + OpAMD64VMULPDMasked512 OpAMD64VSQRTPD512 + OpAMD64VSQRTPDMasked512 OpAMD64VSUBPD512 + OpAMD64VSUBPDMasked512 OpAMD64VPABSW256 + OpAMD64VPABSWMasked256 OpAMD64VPADDW256 + OpAMD64VPADDWMasked256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 - OpAMD64VPABSWMasked256 - OpAMD64VPADDWMasked256 - OpAMD64VPMAXSWMasked256 - OpAMD64VPMINSWMasked256 - OpAMD64VPMULHWMasked256 - OpAMD64VPMULLWMasked256 - OpAMD64VPMADDWDMasked256 - OpAMD64VPOPCNTWMasked256 - OpAMD64VPADDSWMasked256 - OpAMD64VPSUBSWMasked256 - OpAMD64VPSLLVWMasked256 - OpAMD64VPSHLDVWMasked256 - OpAMD64VPSRLVWMasked256 - OpAMD64VPSHRDVWMasked256 - OpAMD64VPSRAVWMasked256 - OpAMD64VPSUBWMasked256 OpAMD64VPMAXSW256 + OpAMD64VPMAXSWMasked256 OpAMD64VPMINSW256 + OpAMD64VPMINSWMasked256 OpAMD64VPMULHW256 + OpAMD64VPMULHWMasked256 OpAMD64VPMULLW256 + OpAMD64VPMULLWMasked256 OpAMD64VPMADDWD256 + OpAMD64VPMADDWDMasked256 OpAMD64VPHADDW256 OpAMD64VPHSUBW256 OpAMD64VPOPCNTW256 + OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSW256 + OpAMD64VPADDSWMasked256 OpAMD64VPHADDSW256 OpAMD64VPHSUBSW256 OpAMD64VPSUBSW256 + OpAMD64VPSUBSWMasked256 OpAMD64VPSLLW256 OpAMD64VPSRLW256 OpAMD64VPSRAW256 OpAMD64VPSLLVW256 OpAMD64VPSHLDVW256 + OpAMD64VPSHLDVWMasked256 + OpAMD64VPSLLVWMasked256 OpAMD64VPSRLVW256 OpAMD64VPSHRDVW256 + OpAMD64VPSHRDVWMasked256 + OpAMD64VPSRLVWMasked256 OpAMD64VPSRAVW256 + OpAMD64VPSRAVWMasked256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 + OpAMD64VPSUBWMasked256 OpAMD64VPABSW512 - OpAMD64VPADDW512 OpAMD64VPABSWMasked512 + OpAMD64VPADDW512 OpAMD64VPADDWMasked512 - OpAMD64VPMAXSWMasked512 - OpAMD64VPMINSWMasked512 - OpAMD64VPMULHWMasked512 - OpAMD64VPMULLWMasked512 - OpAMD64VPMADDWDMasked512 - OpAMD64VPOPCNTWMasked512 - OpAMD64VPADDSWMasked512 - OpAMD64VPSUBSWMasked512 - OpAMD64VPSLLVWMasked512 - OpAMD64VPSHLDVWMasked512 - OpAMD64VPSRLVWMasked512 - OpAMD64VPSHRDVWMasked512 - OpAMD64VPSRAVWMasked512 - OpAMD64VPSUBWMasked512 OpAMD64VPMAXSW512 + OpAMD64VPMAXSWMasked512 OpAMD64VPMINSW512 + OpAMD64VPMINSWMasked512 OpAMD64VPMULHW512 + OpAMD64VPMULHWMasked512 OpAMD64VPMULLW512 + OpAMD64VPMULLWMasked512 OpAMD64VPMADDWD512 + OpAMD64VPMADDWDMasked512 OpAMD64VPOPCNTW512 + OpAMD64VPOPCNTWMasked512 OpAMD64VPADDSW512 + OpAMD64VPADDSWMasked512 OpAMD64VPSUBSW512 + OpAMD64VPSUBSWMasked512 OpAMD64VPSLLVW512 OpAMD64VPSHLDVW512 + OpAMD64VPSHLDVWMasked512 + OpAMD64VPSLLVWMasked512 OpAMD64VPSRLVW512 OpAMD64VPSHRDVW512 + OpAMD64VPSHRDVWMasked512 + OpAMD64VPSRLVWMasked512 OpAMD64VPSRAVW512 + OpAMD64VPSRAVWMasked512 OpAMD64VPSUBW512 + OpAMD64VPSUBWMasked512 OpAMD64VPABSW128 + OpAMD64VPABSWMasked128 OpAMD64VPADDW128 + OpAMD64VPADDWMasked128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 - OpAMD64VPABSWMasked128 - OpAMD64VPADDWMasked128 - OpAMD64VPMAXSWMasked128 - OpAMD64VPMINSWMasked128 - OpAMD64VPMULHWMasked128 - OpAMD64VPMULLWMasked128 - OpAMD64VPMADDWDMasked128 - OpAMD64VPOPCNTWMasked128 - OpAMD64VPADDSWMasked128 - OpAMD64VPSUBSWMasked128 - OpAMD64VPSLLVWMasked128 - OpAMD64VPSHLDVWMasked128 - OpAMD64VPSRLVWMasked128 - OpAMD64VPSHRDVWMasked128 - OpAMD64VPSRAVWMasked128 - OpAMD64VPSUBWMasked128 OpAMD64VPMAXSW128 + OpAMD64VPMAXSWMasked128 OpAMD64VPMINSW128 + OpAMD64VPMINSWMasked128 OpAMD64VPMULHW128 + OpAMD64VPMULHWMasked128 OpAMD64VPMULLW128 + OpAMD64VPMULLWMasked128 OpAMD64VPMADDWD128 + OpAMD64VPMADDWDMasked128 OpAMD64VPHADDW128 OpAMD64VPHSUBW128 OpAMD64VPOPCNTW128 + OpAMD64VPOPCNTWMasked128 OpAMD64VPADDSW128 + OpAMD64VPADDSWMasked128 OpAMD64VPHADDSW128 OpAMD64VPHSUBSW128 OpAMD64VPSUBSW128 + OpAMD64VPSUBSWMasked128 OpAMD64VPSLLW128 OpAMD64VPSRLW128 OpAMD64VPSRAW128 OpAMD64VPSLLVW128 OpAMD64VPSHLDVW128 + OpAMD64VPSHLDVWMasked128 + OpAMD64VPSLLVWMasked128 OpAMD64VPSRLVW128 OpAMD64VPSHRDVW128 + OpAMD64VPSHRDVWMasked128 + OpAMD64VPSRLVWMasked128 OpAMD64VPSRAVW128 + OpAMD64VPSRAVWMasked128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 + OpAMD64VPSUBWMasked128 OpAMD64VPABSD512 - OpAMD64VPADDD512 - OpAMD64VPANDD512 - OpAMD64VPANDND512 OpAMD64VPABSDMasked512 + OpAMD64VPADDD512 OpAMD64VPADDDMasked512 + OpAMD64VPANDD512 OpAMD64VPANDDMasked512 + OpAMD64VPANDND512 OpAMD64VPANDNDMasked512 - OpAMD64VPMAXSDMasked512 - OpAMD64VPMINSDMasked512 - OpAMD64VPMULLDMasked512 - OpAMD64VPORDMasked512 - OpAMD64VPDPWSSDMasked512 - OpAMD64VPOPCNTDMasked512 - OpAMD64VPROLVDMasked512 - OpAMD64VPRORVDMasked512 - OpAMD64VPDPWSSDSMasked512 - OpAMD64VPDPBUSDSMasked512 - OpAMD64VPSLLVDMasked512 - OpAMD64VPSHLDVDMasked512 - OpAMD64VPSRLVDMasked512 - OpAMD64VPSHRDVDMasked512 - OpAMD64VPSRAVDMasked512 - OpAMD64VPSUBDMasked512 - OpAMD64VPDPBUSDMasked512 - OpAMD64VPXORDMasked512 OpAMD64VPMAXSD512 + OpAMD64VPMAXSDMasked512 OpAMD64VPMINSD512 + OpAMD64VPMINSDMasked512 OpAMD64VPMULLD512 + OpAMD64VPMULLDMasked512 OpAMD64VPORD512 + OpAMD64VPORDMasked512 OpAMD64VPDPWSSD512 + OpAMD64VPDPWSSDMasked512 OpAMD64VPOPCNTD512 + OpAMD64VPOPCNTDMasked512 OpAMD64VPROLVD512 + OpAMD64VPROLVDMasked512 OpAMD64VPRORVD512 + OpAMD64VPRORVDMasked512 OpAMD64VPDPWSSDS512 + OpAMD64VPDPWSSDSMasked512 OpAMD64VPDPBUSDS512 + OpAMD64VPDPBUSDSMasked512 OpAMD64VPSLLVD512 OpAMD64VPSHLDVD512 + OpAMD64VPSHLDVDMasked512 + OpAMD64VPSLLVDMasked512 OpAMD64VPSRLVD512 OpAMD64VPSHRDVD512 + OpAMD64VPSHRDVDMasked512 + OpAMD64VPSRLVDMasked512 OpAMD64VPSRAVD512 + OpAMD64VPSRAVDMasked512 OpAMD64VPSUBD512 + OpAMD64VPSUBDMasked512 OpAMD64VPDPBUSD512 + OpAMD64VPDPBUSDMasked512 OpAMD64VPXORD512 + OpAMD64VPXORDMasked512 OpAMD64VPABSD128 - OpAMD64VPADDD128 - OpAMD64VPCMPEQD128 - OpAMD64VPCMPGTD128 OpAMD64VPABSDMasked128 + OpAMD64VPADDD128 OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 OpAMD64VPANDNDMasked128 - OpAMD64VPMAXSDMasked128 - OpAMD64VPMINSDMasked128 - OpAMD64VPMULLDMasked128 - OpAMD64VPORDMasked128 - OpAMD64VPDPWSSDMasked128 - OpAMD64VPOPCNTDMasked128 - OpAMD64VPROLVDMasked128 - OpAMD64VPRORVDMasked128 - OpAMD64VPDPWSSDSMasked128 - OpAMD64VPDPBUSDSMasked128 - OpAMD64VPSLLVDMasked128 - OpAMD64VPSHLDVDMasked128 - OpAMD64VPSRLVDMasked128 - OpAMD64VPSHRDVDMasked128 - OpAMD64VPSRAVDMasked128 - OpAMD64VPSUBDMasked128 - OpAMD64VPDPBUSDMasked128 - OpAMD64VPXORDMasked128 + OpAMD64VPCMPEQD128 + OpAMD64VPCMPGTD128 OpAMD64VPMAXSD128 + OpAMD64VPMAXSDMasked128 OpAMD64VPMINSD128 + OpAMD64VPMINSDMasked128 OpAMD64VPMULDQ128 OpAMD64VPMULLD128 + OpAMD64VPMULLDMasked128 + OpAMD64VPORDMasked128 OpAMD64VPDPWSSD128 + OpAMD64VPDPWSSDMasked128 OpAMD64VPHADDD128 OpAMD64VPHSUBD128 OpAMD64VPOPCNTD128 + OpAMD64VPOPCNTDMasked128 OpAMD64VPROLVD128 + OpAMD64VPROLVDMasked128 OpAMD64VPRORVD128 + OpAMD64VPRORVDMasked128 OpAMD64VPDPWSSDS128 + OpAMD64VPDPWSSDSMasked128 OpAMD64VPDPBUSDS128 + OpAMD64VPDPBUSDSMasked128 OpAMD64VPSLLD128 OpAMD64VPSRLD128 OpAMD64VPSRAD128 OpAMD64VPSLLVD128 OpAMD64VPSHLDVD128 + OpAMD64VPSHLDVDMasked128 + OpAMD64VPSLLVDMasked128 OpAMD64VPSRLVD128 OpAMD64VPSHRDVD128 + OpAMD64VPSHRDVDMasked128 + OpAMD64VPSRLVDMasked128 OpAMD64VPSRAVD128 + OpAMD64VPSRAVDMasked128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 + OpAMD64VPSUBDMasked128 OpAMD64VPDPBUSD128 + OpAMD64VPDPBUSDMasked128 + OpAMD64VPXORDMasked128 OpAMD64VPABSD256 - OpAMD64VPADDD256 - OpAMD64VPCMPEQD256 - OpAMD64VPCMPGTD256 OpAMD64VPABSDMasked256 + OpAMD64VPADDD256 OpAMD64VPADDDMasked256 OpAMD64VPANDDMasked256 OpAMD64VPANDNDMasked256 - OpAMD64VPMAXSDMasked256 - OpAMD64VPMINSDMasked256 - OpAMD64VPMULLDMasked256 - OpAMD64VPORDMasked256 - OpAMD64VPDPWSSDMasked256 - OpAMD64VPOPCNTDMasked256 - OpAMD64VPROLVDMasked256 - OpAMD64VPRORVDMasked256 - OpAMD64VPDPWSSDSMasked256 - OpAMD64VPDPBUSDSMasked256 - OpAMD64VPSLLVDMasked256 - OpAMD64VPSHLDVDMasked256 - OpAMD64VPSRLVDMasked256 - OpAMD64VPSHRDVDMasked256 - OpAMD64VPSRAVDMasked256 - OpAMD64VPSUBDMasked256 - OpAMD64VPDPBUSDMasked256 - OpAMD64VPXORDMasked256 + OpAMD64VPCMPEQD256 + OpAMD64VPCMPGTD256 OpAMD64VPMAXSD256 + OpAMD64VPMAXSDMasked256 OpAMD64VPMINSD256 + OpAMD64VPMINSDMasked256 OpAMD64VPMULDQ256 OpAMD64VPMULLD256 + OpAMD64VPMULLDMasked256 + OpAMD64VPORDMasked256 OpAMD64VPDPWSSD256 + OpAMD64VPDPWSSDMasked256 OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 + OpAMD64VPOPCNTDMasked256 OpAMD64VPROLVD256 + OpAMD64VPROLVDMasked256 OpAMD64VPRORVD256 + OpAMD64VPRORVDMasked256 OpAMD64VPDPWSSDS256 + OpAMD64VPDPWSSDSMasked256 OpAMD64VPDPBUSDS256 + OpAMD64VPDPBUSDSMasked256 OpAMD64VPSLLD256 OpAMD64VPSRLD256 OpAMD64VPSRAD256 OpAMD64VPSLLVD256 OpAMD64VPSHLDVD256 + OpAMD64VPSHLDVDMasked256 + OpAMD64VPSLLVDMasked256 OpAMD64VPSRLVD256 OpAMD64VPSHRDVD256 + OpAMD64VPSHRDVDMasked256 + OpAMD64VPSRLVDMasked256 OpAMD64VPSRAVD256 + OpAMD64VPSRAVDMasked256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 + OpAMD64VPSUBDMasked256 OpAMD64VPDPBUSD256 + OpAMD64VPDPBUSDMasked256 + OpAMD64VPXORDMasked256 OpAMD64VPABSQ128 - OpAMD64VPADDQ128 - OpAMD64VPCMPEQQ128 OpAMD64VPABSQMasked128 + OpAMD64VPADDQ128 OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 + OpAMD64VPCMPEQQ128 + OpAMD64VPMAXSQ128 OpAMD64VPMAXSQMasked128 + OpAMD64VPMINSQ128 OpAMD64VPMINSQMasked128 OpAMD64VPMULDQMasked128 + OpAMD64VPMULLQ128 OpAMD64VPMULLQMasked128 OpAMD64VPORQMasked128 - OpAMD64VPOPCNTQMasked128 - OpAMD64VPROLVQMasked128 - OpAMD64VPRORVQMasked128 - OpAMD64VPSLLQMasked128 - OpAMD64VPSRLQMasked128 - OpAMD64VPSRAQMasked128 - OpAMD64VPSLLVQMasked128 - OpAMD64VPSHLDVQMasked128 - OpAMD64VPSRLVQMasked128 - OpAMD64VPSHRDVQMasked128 - OpAMD64VPSRAVQMasked128 - OpAMD64VPSUBQMasked128 - OpAMD64VPXORQMasked128 - OpAMD64VPMAXSQ128 - OpAMD64VPMINSQ128 - OpAMD64VPMULLQ128 OpAMD64VPOPCNTQ128 + OpAMD64VPOPCNTQMasked128 OpAMD64VPROLVQ128 + OpAMD64VPROLVQMasked128 OpAMD64VPRORVQ128 + OpAMD64VPRORVQMasked128 OpAMD64VPSLLQ128 + OpAMD64VPSLLQMasked128 OpAMD64VPSRLQ128 + OpAMD64VPSRLQMasked128 OpAMD64VPSRAQ128 + OpAMD64VPSRAQMasked128 OpAMD64VPSLLVQ128 OpAMD64VPSHLDVQ128 + OpAMD64VPSHLDVQMasked128 + OpAMD64VPSLLVQMasked128 OpAMD64VPSRLVQ128 OpAMD64VPSHRDVQ128 + OpAMD64VPSHRDVQMasked128 + OpAMD64VPSRLVQMasked128 OpAMD64VPSRAVQ128 + OpAMD64VPSRAVQMasked128 OpAMD64VPSUBQ128 + OpAMD64VPSUBQMasked128 + OpAMD64VPXORQMasked128 OpAMD64VPABSQ256 - OpAMD64VPADDQ256 - OpAMD64VPCMPEQQ256 - OpAMD64VPCMPGTQ256 OpAMD64VPABSQMasked256 + OpAMD64VPADDQ256 OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 + OpAMD64VPCMPEQQ256 + OpAMD64VPCMPGTQ256 + OpAMD64VPMAXSQ256 OpAMD64VPMAXSQMasked256 + OpAMD64VPMINSQ256 OpAMD64VPMINSQMasked256 OpAMD64VPMULDQMasked256 + OpAMD64VPMULLQ256 OpAMD64VPMULLQMasked256 OpAMD64VPORQMasked256 - OpAMD64VPOPCNTQMasked256 - OpAMD64VPROLVQMasked256 - OpAMD64VPRORVQMasked256 - OpAMD64VPSLLQMasked256 - OpAMD64VPSRLQMasked256 - OpAMD64VPSRAQMasked256 - OpAMD64VPSLLVQMasked256 - OpAMD64VPSHLDVQMasked256 - OpAMD64VPSRLVQMasked256 - OpAMD64VPSHRDVQMasked256 - OpAMD64VPSRAVQMasked256 - OpAMD64VPSUBQMasked256 - OpAMD64VPXORQMasked256 - OpAMD64VPMAXSQ256 - OpAMD64VPMINSQ256 - OpAMD64VPMULLQ256 OpAMD64VPOPCNTQ256 + OpAMD64VPOPCNTQMasked256 OpAMD64VPROLVQ256 + OpAMD64VPROLVQMasked256 OpAMD64VPRORVQ256 + OpAMD64VPRORVQMasked256 OpAMD64VPSLLQ256 + OpAMD64VPSLLQMasked256 OpAMD64VPSRLQ256 + OpAMD64VPSRLQMasked256 OpAMD64VPSRAQ256 + OpAMD64VPSRAQMasked256 OpAMD64VPSLLVQ256 OpAMD64VPSHLDVQ256 + OpAMD64VPSHLDVQMasked256 + OpAMD64VPSLLVQMasked256 OpAMD64VPSRLVQ256 OpAMD64VPSHRDVQ256 + OpAMD64VPSHRDVQMasked256 + OpAMD64VPSRLVQMasked256 OpAMD64VPSRAVQ256 + OpAMD64VPSRAVQMasked256 OpAMD64VPSUBQ256 + OpAMD64VPSUBQMasked256 + OpAMD64VPXORQMasked256 OpAMD64VPABSQ512 - OpAMD64VPADDQ512 - OpAMD64VPANDQ512 - OpAMD64VPANDNQ512 OpAMD64VPABSQMasked512 + OpAMD64VPADDQ512 OpAMD64VPADDQMasked512 + OpAMD64VPANDQ512 OpAMD64VPANDQMasked512 + OpAMD64VPANDNQ512 OpAMD64VPANDNQMasked512 - OpAMD64VPMAXSQMasked512 - OpAMD64VPMINSQMasked512 - OpAMD64VPMULDQMasked512 - OpAMD64VPMULLQMasked512 - OpAMD64VPORQMasked512 - OpAMD64VPOPCNTQMasked512 - OpAMD64VPROLVQMasked512 - OpAMD64VPRORVQMasked512 - OpAMD64VPSLLQMasked512 - OpAMD64VPSRLQMasked512 - OpAMD64VPSRAQMasked512 - OpAMD64VPSLLVQMasked512 - OpAMD64VPSHLDVQMasked512 - OpAMD64VPSRLVQMasked512 - OpAMD64VPSHRDVQMasked512 - OpAMD64VPSRAVQMasked512 - OpAMD64VPSUBQMasked512 - OpAMD64VPXORQMasked512 OpAMD64VPMAXSQ512 + OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQ512 + OpAMD64VPMINSQMasked512 OpAMD64VPMULDQ512 + OpAMD64VPMULDQMasked512 OpAMD64VPMULLQ512 + OpAMD64VPMULLQMasked512 OpAMD64VPORQ512 + OpAMD64VPORQMasked512 OpAMD64VPOPCNTQ512 + OpAMD64VPOPCNTQMasked512 OpAMD64VPROLVQ512 + OpAMD64VPROLVQMasked512 OpAMD64VPRORVQ512 + OpAMD64VPRORVQMasked512 OpAMD64VPSLLQ512 + OpAMD64VPSLLQMasked512 OpAMD64VPSRLQ512 + OpAMD64VPSRLQMasked512 OpAMD64VPSRAQ512 + OpAMD64VPSRAQMasked512 OpAMD64VPSLLVQ512 OpAMD64VPSHLDVQ512 + OpAMD64VPSHLDVQMasked512 + OpAMD64VPSLLVQMasked512 OpAMD64VPSRLVQ512 OpAMD64VPSHRDVQ512 + OpAMD64VPSHRDVQMasked512 + OpAMD64VPSRLVQMasked512 OpAMD64VPSRAVQ512 + OpAMD64VPSRAVQMasked512 OpAMD64VPSUBQ512 + OpAMD64VPSUBQMasked512 OpAMD64VPXORQ512 + OpAMD64VPXORQMasked512 OpAMD64VPABSB128 + OpAMD64VPABSBMasked128 OpAMD64VPADDB128 + OpAMD64VPADDBMasked128 OpAMD64VPAND128 OpAMD64VPANDN128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 - OpAMD64VPABSBMasked128 - OpAMD64VPADDBMasked128 - OpAMD64VPMAXSBMasked128 - OpAMD64VPMINSBMasked128 - OpAMD64VPOPCNTBMasked128 - OpAMD64VPADDSBMasked128 - OpAMD64VPSUBSBMasked128 - OpAMD64VPSUBBMasked128 OpAMD64VPMAXSB128 + OpAMD64VPMAXSBMasked128 OpAMD64VPMINSB128 + OpAMD64VPMINSBMasked128 OpAMD64VPOR128 OpAMD64VPOPCNTB128 + OpAMD64VPOPCNTBMasked128 OpAMD64VPADDSB128 + OpAMD64VPADDSBMasked128 OpAMD64VPSUBSB128 + OpAMD64VPSUBSBMasked128 OpAMD64VPSIGNB128 OpAMD64VPSUBB128 + OpAMD64VPSUBBMasked128 OpAMD64VPXOR128 OpAMD64VPABSB256 + OpAMD64VPABSBMasked256 OpAMD64VPADDB256 + OpAMD64VPADDBMasked256 OpAMD64VPAND256 OpAMD64VPANDN256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 - OpAMD64VPABSBMasked256 - OpAMD64VPADDBMasked256 - OpAMD64VPMAXSBMasked256 - OpAMD64VPMINSBMasked256 - OpAMD64VPOPCNTBMasked256 - OpAMD64VPADDSBMasked256 - OpAMD64VPSUBSBMasked256 - OpAMD64VPSUBBMasked256 OpAMD64VPMAXSB256 + OpAMD64VPMAXSBMasked256 OpAMD64VPMINSB256 + OpAMD64VPMINSBMasked256 OpAMD64VPOR256 OpAMD64VPOPCNTB256 + OpAMD64VPOPCNTBMasked256 OpAMD64VPADDSB256 + OpAMD64VPADDSBMasked256 OpAMD64VPSUBSB256 + OpAMD64VPSUBSBMasked256 OpAMD64VPSIGNB256 OpAMD64VPSUBB256 + OpAMD64VPSUBBMasked256 OpAMD64VPXOR256 OpAMD64VPABSB512 - OpAMD64VPADDB512 OpAMD64VPABSBMasked512 + OpAMD64VPADDB512 OpAMD64VPADDBMasked512 - OpAMD64VPMAXSBMasked512 - OpAMD64VPMINSBMasked512 - OpAMD64VPOPCNTBMasked512 - OpAMD64VPADDSBMasked512 - OpAMD64VPSUBSBMasked512 - OpAMD64VPSUBBMasked512 OpAMD64VPMAXSB512 + OpAMD64VPMAXSBMasked512 OpAMD64VPMINSB512 + OpAMD64VPMINSBMasked512 OpAMD64VPOPCNTB512 + OpAMD64VPOPCNTBMasked512 OpAMD64VPADDSB512 + OpAMD64VPADDSBMasked512 OpAMD64VPSUBSB512 + OpAMD64VPSUBSBMasked512 OpAMD64VPSUBB512 + OpAMD64VPSUBBMasked512 OpAMD64VPAVGW256 OpAMD64VPAVGWMasked256 - OpAMD64VPMAXUWMasked256 - OpAMD64VPMINUWMasked256 - OpAMD64VPMULHUWMasked256 OpAMD64VPMAXUW256 + OpAMD64VPMAXUWMasked256 OpAMD64VPMINUW256 + OpAMD64VPMINUWMasked256 OpAMD64VPMULHUW256 + OpAMD64VPMULHUWMasked256 OpAMD64VPAVGW512 OpAMD64VPAVGWMasked512 - OpAMD64VPMAXUWMasked512 - OpAMD64VPMINUWMasked512 - OpAMD64VPMULHUWMasked512 OpAMD64VPMAXUW512 + OpAMD64VPMAXUWMasked512 OpAMD64VPMINUW512 + OpAMD64VPMINUWMasked512 OpAMD64VPMULHUW512 + OpAMD64VPMULHUWMasked512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 - OpAMD64VPMAXUWMasked128 - OpAMD64VPMINUWMasked128 - OpAMD64VPMULHUWMasked128 OpAMD64VPMAXUW128 + OpAMD64VPMAXUWMasked128 OpAMD64VPMINUW128 + OpAMD64VPMINUWMasked128 OpAMD64VPMULHUW128 - OpAMD64VPMAXUDMasked512 - OpAMD64VPMINUDMasked512 + OpAMD64VPMULHUWMasked128 OpAMD64VPMAXUD512 + OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 - OpAMD64VPMAXUDMasked128 - OpAMD64VPMINUDMasked128 + OpAMD64VPMINUDMasked512 OpAMD64VPMAXUD128 + OpAMD64VPMAXUDMasked128 OpAMD64VPMINUD128 + OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 - OpAMD64VPMAXUDMasked256 - OpAMD64VPMINUDMasked256 OpAMD64VPMAXUD256 + OpAMD64VPMAXUDMasked256 OpAMD64VPMINUD256 + OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 + OpAMD64VPMAXUQ128 OpAMD64VPMAXUQMasked128 + OpAMD64VPMINUQ128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 - OpAMD64VPMAXUQ128 - OpAMD64VPMINUQ128 + OpAMD64VPMAXUQ256 OpAMD64VPMAXUQMasked256 + OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 - OpAMD64VPMAXUQ256 - OpAMD64VPMINUQ256 - OpAMD64VPMAXUQMasked512 - OpAMD64VPMINUQMasked512 - OpAMD64VPMULUDQMasked512 OpAMD64VPMAXUQ512 + OpAMD64VPMAXUQMasked512 OpAMD64VPMINUQ512 + OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQ512 + OpAMD64VPMULUDQMasked512 OpAMD64VPAVGB128 - OpAMD64VGF2P8MULB128 OpAMD64VPAVGBMasked128 + OpAMD64VGF2P8MULB128 OpAMD64VGF2P8MULBMasked128 - OpAMD64VPMAXUBMasked128 - OpAMD64VPMINUBMasked128 - OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUB128 + OpAMD64VPMAXUBMasked128 OpAMD64VPMINUB128 + OpAMD64VPMINUBMasked128 OpAMD64VPMADDUBSW128 + OpAMD64VPMADDUBSWMasked128 OpAMD64VPAVGB256 - OpAMD64VGF2P8MULB256 OpAMD64VPAVGBMasked256 + OpAMD64VGF2P8MULB256 OpAMD64VGF2P8MULBMasked256 - OpAMD64VPMAXUBMasked256 - OpAMD64VPMINUBMasked256 - OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUB256 + OpAMD64VPMAXUBMasked256 OpAMD64VPMINUB256 + OpAMD64VPMINUBMasked256 OpAMD64VPMADDUBSW256 + OpAMD64VPMADDUBSWMasked256 OpAMD64VPAVGB512 - OpAMD64VGF2P8MULB512 OpAMD64VPAVGBMasked512 + OpAMD64VGF2P8MULB512 OpAMD64VGF2P8MULBMasked512 - OpAMD64VPMAXUBMasked512 - OpAMD64VPMINUBMasked512 - OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUB512 + OpAMD64VPMAXUBMasked512 OpAMD64VPMINUB512 + OpAMD64VPMINUBMasked512 OpAMD64VPMADDUBSW512 + OpAMD64VPMADDUBSWMasked512 OpAMD64VRNDSCALEPS512 - OpAMD64VREDUCEPS512 - OpAMD64VCMPPS512 OpAMD64VRNDSCALEPSMasked512 + OpAMD64VREDUCEPS512 OpAMD64VREDUCEPSMasked512 + OpAMD64VCMPPS512 OpAMD64VCMPPSMasked512 OpAMD64VROUNDPS128 OpAMD64VRNDSCALEPS128 - OpAMD64VREDUCEPS128 - OpAMD64VCMPPS128 OpAMD64VRNDSCALEPSMasked128 + OpAMD64VREDUCEPS128 OpAMD64VREDUCEPSMasked128 + OpAMD64VCMPPS128 OpAMD64VCMPPSMasked128 OpAMD64VROUNDPS256 OpAMD64VRNDSCALEPS256 - OpAMD64VREDUCEPS256 - OpAMD64VCMPPS256 - OpAMD64VEXTRACTF128128 OpAMD64VRNDSCALEPSMasked256 + OpAMD64VREDUCEPS256 OpAMD64VREDUCEPSMasked256 + OpAMD64VCMPPS256 OpAMD64VCMPPSMasked256 + OpAMD64VEXTRACTF128128 OpAMD64VINSERTF128256 OpAMD64VROUNDPD128 OpAMD64VRNDSCALEPD128 + OpAMD64VRNDSCALEPDMasked128 OpAMD64VREDUCEPD128 + OpAMD64VREDUCEPDMasked128 OpAMD64VDPPD128 OpAMD64VCMPPD128 - OpAMD64VRNDSCALEPDMasked128 - OpAMD64VREDUCEPDMasked128 OpAMD64VCMPPDMasked128 OpAMD64VROUNDPD256 OpAMD64VRNDSCALEPD256 - OpAMD64VREDUCEPD256 - OpAMD64VCMPPD256 OpAMD64VRNDSCALEPDMasked256 + OpAMD64VREDUCEPD256 OpAMD64VREDUCEPDMasked256 + OpAMD64VCMPPD256 OpAMD64VCMPPDMasked256 OpAMD64VRNDSCALEPD512 - OpAMD64VREDUCEPD512 - OpAMD64VCMPPD512 OpAMD64VRNDSCALEPDMasked512 + OpAMD64VREDUCEPD512 OpAMD64VREDUCEPDMasked512 + OpAMD64VCMPPD512 OpAMD64VCMPPDMasked512 - OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 - OpAMD64VPSHLDWMasked256 - OpAMD64VPSHRDWMasked256 + OpAMD64VPCMPW256 OpAMD64VPSHLDW256 + OpAMD64VPSHLDWMasked256 OpAMD64VPSHRDW256 + OpAMD64VPSHRDWMasked256 OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 - OpAMD64VPSHLDWMasked512 - OpAMD64VPSHRDWMasked512 OpAMD64VPSHLDW512 + OpAMD64VPSHLDWMasked512 OpAMD64VPSHRDW512 + OpAMD64VPSHRDWMasked512 + OpAMD64VPCMPWMasked128 OpAMD64VPEXTRW128 OpAMD64VPCMPW128 - OpAMD64VPCMPWMasked128 - OpAMD64VPSHLDWMasked128 - OpAMD64VPSHRDWMasked128 OpAMD64VPINSRW128 OpAMD64VPSHLDW128 + OpAMD64VPSHLDWMasked128 OpAMD64VPSHRDW128 + OpAMD64VPSHRDWMasked128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 - OpAMD64VPROLDMasked512 - OpAMD64VPRORDMasked512 - OpAMD64VPSHLDDMasked512 - OpAMD64VPSHRDDMasked512 OpAMD64VPROLD512 + OpAMD64VPROLDMasked512 OpAMD64VPRORD512 + OpAMD64VPRORDMasked512 OpAMD64VPSHLDD512 + OpAMD64VPSHLDDMasked512 OpAMD64VPSHRDD512 + OpAMD64VPSHRDDMasked512 + OpAMD64VPCMPDMasked128 OpAMD64VPEXTRD128 OpAMD64VPCMPD128 - OpAMD64VPCMPDMasked128 - OpAMD64VPROLDMasked128 - OpAMD64VPRORDMasked128 - OpAMD64VPSHLDDMasked128 - OpAMD64VPSHRDDMasked128 OpAMD64VPROLD128 + OpAMD64VPROLDMasked128 OpAMD64VPRORD128 + OpAMD64VPRORDMasked128 OpAMD64VPINSRD128 OpAMD64VPSHLDD128 + OpAMD64VPSHLDDMasked128 OpAMD64VPSHRDD128 - OpAMD64VPCMPD256 + OpAMD64VPSHRDDMasked128 OpAMD64VPCMPDMasked256 - OpAMD64VPROLDMasked256 - OpAMD64VPRORDMasked256 - OpAMD64VPSHLDDMasked256 - OpAMD64VPSHRDDMasked256 + OpAMD64VPCMPD256 OpAMD64VPROLD256 + OpAMD64VPROLDMasked256 OpAMD64VPRORD256 + OpAMD64VPRORDMasked256 OpAMD64VPSHLDD256 + OpAMD64VPSHLDDMasked256 OpAMD64VPSHRDD256 + OpAMD64VPSHRDDMasked256 + OpAMD64VPCMPQMasked128 OpAMD64VPEXTRQ128 OpAMD64VPCMPQ128 - OpAMD64VPCMPQMasked128 - OpAMD64VPROLQMasked128 - OpAMD64VPRORQMasked128 - OpAMD64VPSHLDQMasked128 - OpAMD64VPSHRDQMasked128 OpAMD64VPROLQ128 + OpAMD64VPROLQMasked128 OpAMD64VPRORQ128 + OpAMD64VPRORQMasked128 OpAMD64VPINSRQ128 OpAMD64VPSHLDQ128 + OpAMD64VPSHLDQMasked128 OpAMD64VPSHRDQ128 - OpAMD64VPCMPQ256 + OpAMD64VPSHRDQMasked128 OpAMD64VPCMPQMasked256 - OpAMD64VPROLQMasked256 - OpAMD64VPRORQMasked256 - OpAMD64VPSHLDQMasked256 - OpAMD64VPSHRDQMasked256 + OpAMD64VPCMPQ256 OpAMD64VPROLQ256 + OpAMD64VPROLQMasked256 OpAMD64VPRORQ256 + OpAMD64VPRORQMasked256 OpAMD64VPSHLDQ256 + OpAMD64VPSHLDQMasked256 OpAMD64VPSHRDQ256 + OpAMD64VPSHRDQMasked256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 - OpAMD64VPROLQMasked512 - OpAMD64VPRORQMasked512 - OpAMD64VPSHLDQMasked512 - OpAMD64VPSHRDQMasked512 OpAMD64VPROLQ512 + OpAMD64VPROLQMasked512 OpAMD64VPRORQ512 + OpAMD64VPRORQMasked512 OpAMD64VPSHLDQ512 + OpAMD64VPSHLDQMasked512 OpAMD64VPSHRDQ512 + OpAMD64VPSHRDQMasked512 + OpAMD64VPCMPBMasked128 OpAMD64VPEXTRB128 OpAMD64VPCMPB128 - OpAMD64VPCMPBMasked128 OpAMD64VPINSRB128 + OpAMD64VPCMPBMasked256 OpAMD64VEXTRACTI128128 OpAMD64VPCMPB256 - OpAMD64VPCMPBMasked256 OpAMD64VINSERTI128256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 @@ -2049,23 +2049,23 @@ const ( OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 + OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 OpAMD64VGF2P8AFFINEINVQB128 - OpAMD64VPCMPUBMasked128 - OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VGF2P8AFFINEINVQBMasked128 + OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VPCMPUB256 + OpAMD64VPCMPUBMasked256 OpAMD64VGF2P8AFFINEQB256 OpAMD64VGF2P8AFFINEINVQB256 - OpAMD64VPCMPUBMasked256 - OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEINVQBMasked256 + OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VPCMPUB512 + OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 OpAMD64VGF2P8AFFINEINVQB512 - OpAMD64VPCMPUBMasked512 - OpAMD64VGF2P8AFFINEQBMasked512 OpAMD64VGF2P8AFFINEINVQBMasked512 + OpAMD64VGF2P8AFFINEQBMasked512 OpARMADD OpARMADDconst @@ -4293,1682 +4293,1682 @@ const ( OpAdd32x4 OpZeroSIMD OpAddFloat32x16 + OpAddMaskedFloat32x16 OpApproximateReciprocalFloat32x16 + OpApproximateReciprocalMaskedFloat32x16 OpApproximateReciprocalOfSqrtFloat32x16 + OpApproximateReciprocalOfSqrtMaskedFloat32x16 OpDivFloat32x16 + OpDivMaskedFloat32x16 OpEqualFloat32x16 + OpEqualMaskedFloat32x16 OpFusedMultiplyAddFloat32x16 + OpFusedMultiplyAddMaskedFloat32x16 OpFusedMultiplyAddSubFloat32x16 + OpFusedMultiplyAddSubMaskedFloat32x16 OpFusedMultiplySubAddFloat32x16 + OpFusedMultiplySubAddMaskedFloat32x16 OpGreaterFloat32x16 OpGreaterEqualFloat32x16 + OpGreaterEqualMaskedFloat32x16 + OpGreaterMaskedFloat32x16 OpIsNanFloat32x16 + OpIsNanMaskedFloat32x16 OpLessFloat32x16 OpLessEqualFloat32x16 - OpMaskedAddFloat32x16 - OpMaskedApproximateReciprocalFloat32x16 - OpMaskedApproximateReciprocalOfSqrtFloat32x16 - OpMaskedDivFloat32x16 - OpMaskedEqualFloat32x16 - OpMaskedFusedMultiplyAddFloat32x16 - OpMaskedFusedMultiplyAddSubFloat32x16 - OpMaskedFusedMultiplySubAddFloat32x16 - OpMaskedGreaterFloat32x16 - OpMaskedGreaterEqualFloat32x16 - OpMaskedIsNanFloat32x16 - OpMaskedLessFloat32x16 - OpMaskedLessEqualFloat32x16 - OpMaskedMaxFloat32x16 - OpMaskedMinFloat32x16 - OpMaskedMulFloat32x16 - OpMaskedMulByPowOf2Float32x16 - OpMaskedNotEqualFloat32x16 - OpMaskedSqrtFloat32x16 - OpMaskedSubFloat32x16 + OpLessEqualMaskedFloat32x16 + OpLessMaskedFloat32x16 OpMaxFloat32x16 + OpMaxMaskedFloat32x16 OpMinFloat32x16 + OpMinMaskedFloat32x16 OpMulFloat32x16 OpMulByPowOf2Float32x16 + OpMulByPowOf2MaskedFloat32x16 + OpMulMaskedFloat32x16 OpNotEqualFloat32x16 + OpNotEqualMaskedFloat32x16 OpSqrtFloat32x16 + OpSqrtMaskedFloat32x16 OpSubFloat32x16 + OpSubMaskedFloat32x16 OpAddFloat32x4 + OpAddMaskedFloat32x4 OpAddSubFloat32x4 OpApproximateReciprocalFloat32x4 + OpApproximateReciprocalMaskedFloat32x4 OpApproximateReciprocalOfSqrtFloat32x4 + OpApproximateReciprocalOfSqrtMaskedFloat32x4 OpCeilFloat32x4 OpDivFloat32x4 + OpDivMaskedFloat32x4 OpEqualFloat32x4 + OpEqualMaskedFloat32x4 OpFloorFloat32x4 OpFusedMultiplyAddFloat32x4 + OpFusedMultiplyAddMaskedFloat32x4 OpFusedMultiplyAddSubFloat32x4 + OpFusedMultiplyAddSubMaskedFloat32x4 OpFusedMultiplySubAddFloat32x4 + OpFusedMultiplySubAddMaskedFloat32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 + OpGreaterEqualMaskedFloat32x4 + OpGreaterMaskedFloat32x4 OpIsNanFloat32x4 + OpIsNanMaskedFloat32x4 OpLessFloat32x4 OpLessEqualFloat32x4 - OpMaskedAddFloat32x4 - OpMaskedApproximateReciprocalFloat32x4 - OpMaskedApproximateReciprocalOfSqrtFloat32x4 - OpMaskedDivFloat32x4 - OpMaskedEqualFloat32x4 - OpMaskedFusedMultiplyAddFloat32x4 - OpMaskedFusedMultiplyAddSubFloat32x4 - OpMaskedFusedMultiplySubAddFloat32x4 - OpMaskedGreaterFloat32x4 - OpMaskedGreaterEqualFloat32x4 - OpMaskedIsNanFloat32x4 - OpMaskedLessFloat32x4 - OpMaskedLessEqualFloat32x4 - OpMaskedMaxFloat32x4 - OpMaskedMinFloat32x4 - OpMaskedMulFloat32x4 - OpMaskedMulByPowOf2Float32x4 - OpMaskedNotEqualFloat32x4 - OpMaskedSqrtFloat32x4 - OpMaskedSubFloat32x4 + OpLessEqualMaskedFloat32x4 + OpLessMaskedFloat32x4 OpMaxFloat32x4 + OpMaxMaskedFloat32x4 OpMinFloat32x4 + OpMinMaskedFloat32x4 OpMulFloat32x4 OpMulByPowOf2Float32x4 + OpMulByPowOf2MaskedFloat32x4 + OpMulMaskedFloat32x4 OpNotEqualFloat32x4 + OpNotEqualMaskedFloat32x4 OpPairwiseAddFloat32x4 OpPairwiseSubFloat32x4 OpRoundFloat32x4 OpSqrtFloat32x4 + OpSqrtMaskedFloat32x4 OpSubFloat32x4 + OpSubMaskedFloat32x4 OpTruncFloat32x4 OpAddFloat32x8 + OpAddMaskedFloat32x8 OpAddSubFloat32x8 OpApproximateReciprocalFloat32x8 + OpApproximateReciprocalMaskedFloat32x8 OpApproximateReciprocalOfSqrtFloat32x8 + OpApproximateReciprocalOfSqrtMaskedFloat32x8 OpCeilFloat32x8 OpDivFloat32x8 + OpDivMaskedFloat32x8 OpEqualFloat32x8 + OpEqualMaskedFloat32x8 OpFloorFloat32x8 OpFusedMultiplyAddFloat32x8 + OpFusedMultiplyAddMaskedFloat32x8 OpFusedMultiplyAddSubFloat32x8 + OpFusedMultiplyAddSubMaskedFloat32x8 OpFusedMultiplySubAddFloat32x8 + OpFusedMultiplySubAddMaskedFloat32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 + OpGreaterEqualMaskedFloat32x8 + OpGreaterMaskedFloat32x8 OpIsNanFloat32x8 + OpIsNanMaskedFloat32x8 OpLessFloat32x8 OpLessEqualFloat32x8 - OpMaskedAddFloat32x8 - OpMaskedApproximateReciprocalFloat32x8 - OpMaskedApproximateReciprocalOfSqrtFloat32x8 - OpMaskedDivFloat32x8 - OpMaskedEqualFloat32x8 - OpMaskedFusedMultiplyAddFloat32x8 - OpMaskedFusedMultiplyAddSubFloat32x8 - OpMaskedFusedMultiplySubAddFloat32x8 - OpMaskedGreaterFloat32x8 - OpMaskedGreaterEqualFloat32x8 - OpMaskedIsNanFloat32x8 - OpMaskedLessFloat32x8 - OpMaskedLessEqualFloat32x8 - OpMaskedMaxFloat32x8 - OpMaskedMinFloat32x8 - OpMaskedMulFloat32x8 - OpMaskedMulByPowOf2Float32x8 - OpMaskedNotEqualFloat32x8 - OpMaskedSqrtFloat32x8 - OpMaskedSubFloat32x8 + OpLessEqualMaskedFloat32x8 + OpLessMaskedFloat32x8 OpMaxFloat32x8 + OpMaxMaskedFloat32x8 OpMinFloat32x8 + OpMinMaskedFloat32x8 OpMulFloat32x8 OpMulByPowOf2Float32x8 + OpMulByPowOf2MaskedFloat32x8 + OpMulMaskedFloat32x8 OpNotEqualFloat32x8 + OpNotEqualMaskedFloat32x8 OpPairwiseAddFloat32x8 OpPairwiseSubFloat32x8 OpRoundFloat32x8 OpSqrtFloat32x8 + OpSqrtMaskedFloat32x8 OpSubFloat32x8 + OpSubMaskedFloat32x8 OpTruncFloat32x8 OpAddFloat64x2 + OpAddMaskedFloat64x2 OpAddSubFloat64x2 OpApproximateReciprocalFloat64x2 + OpApproximateReciprocalMaskedFloat64x2 OpApproximateReciprocalOfSqrtFloat64x2 + OpApproximateReciprocalOfSqrtMaskedFloat64x2 OpCeilFloat64x2 OpDivFloat64x2 + OpDivMaskedFloat64x2 OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 + OpEqualMaskedFloat64x2 OpFloorFloat64x2 OpFusedMultiplyAddFloat64x2 + OpFusedMultiplyAddMaskedFloat64x2 OpFusedMultiplyAddSubFloat64x2 + OpFusedMultiplyAddSubMaskedFloat64x2 OpFusedMultiplySubAddFloat64x2 + OpFusedMultiplySubAddMaskedFloat64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 + OpGreaterEqualMaskedFloat64x2 + OpGreaterMaskedFloat64x2 OpIsNanFloat64x2 + OpIsNanMaskedFloat64x2 OpLessFloat64x2 OpLessEqualFloat64x2 - OpMaskedAddFloat64x2 - OpMaskedApproximateReciprocalFloat64x2 - OpMaskedApproximateReciprocalOfSqrtFloat64x2 - OpMaskedDivFloat64x2 - OpMaskedEqualFloat64x2 - OpMaskedFusedMultiplyAddFloat64x2 - OpMaskedFusedMultiplyAddSubFloat64x2 - OpMaskedFusedMultiplySubAddFloat64x2 - OpMaskedGreaterFloat64x2 - OpMaskedGreaterEqualFloat64x2 - OpMaskedIsNanFloat64x2 - OpMaskedLessFloat64x2 - OpMaskedLessEqualFloat64x2 - OpMaskedMaxFloat64x2 - OpMaskedMinFloat64x2 - OpMaskedMulFloat64x2 - OpMaskedMulByPowOf2Float64x2 - OpMaskedNotEqualFloat64x2 - OpMaskedSqrtFloat64x2 - OpMaskedSubFloat64x2 + OpLessEqualMaskedFloat64x2 + OpLessMaskedFloat64x2 OpMaxFloat64x2 + OpMaxMaskedFloat64x2 OpMinFloat64x2 + OpMinMaskedFloat64x2 OpMulFloat64x2 OpMulByPowOf2Float64x2 + OpMulByPowOf2MaskedFloat64x2 + OpMulMaskedFloat64x2 OpNotEqualFloat64x2 + OpNotEqualMaskedFloat64x2 OpPairwiseAddFloat64x2 OpPairwiseSubFloat64x2 OpRoundFloat64x2 OpSqrtFloat64x2 + OpSqrtMaskedFloat64x2 OpSubFloat64x2 + OpSubMaskedFloat64x2 OpTruncFloat64x2 OpAddFloat64x4 + OpAddMaskedFloat64x4 OpAddSubFloat64x4 OpApproximateReciprocalFloat64x4 + OpApproximateReciprocalMaskedFloat64x4 OpApproximateReciprocalOfSqrtFloat64x4 + OpApproximateReciprocalOfSqrtMaskedFloat64x4 OpCeilFloat64x4 OpDivFloat64x4 + OpDivMaskedFloat64x4 OpEqualFloat64x4 + OpEqualMaskedFloat64x4 OpFloorFloat64x4 OpFusedMultiplyAddFloat64x4 + OpFusedMultiplyAddMaskedFloat64x4 OpFusedMultiplyAddSubFloat64x4 + OpFusedMultiplyAddSubMaskedFloat64x4 OpFusedMultiplySubAddFloat64x4 + OpFusedMultiplySubAddMaskedFloat64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 + OpGreaterEqualMaskedFloat64x4 + OpGreaterMaskedFloat64x4 OpIsNanFloat64x4 + OpIsNanMaskedFloat64x4 OpLessFloat64x4 OpLessEqualFloat64x4 - OpMaskedAddFloat64x4 - OpMaskedApproximateReciprocalFloat64x4 - OpMaskedApproximateReciprocalOfSqrtFloat64x4 - OpMaskedDivFloat64x4 - OpMaskedEqualFloat64x4 - OpMaskedFusedMultiplyAddFloat64x4 - OpMaskedFusedMultiplyAddSubFloat64x4 - OpMaskedFusedMultiplySubAddFloat64x4 - OpMaskedGreaterFloat64x4 - OpMaskedGreaterEqualFloat64x4 - OpMaskedIsNanFloat64x4 - OpMaskedLessFloat64x4 - OpMaskedLessEqualFloat64x4 - OpMaskedMaxFloat64x4 - OpMaskedMinFloat64x4 - OpMaskedMulFloat64x4 - OpMaskedMulByPowOf2Float64x4 - OpMaskedNotEqualFloat64x4 - OpMaskedSqrtFloat64x4 - OpMaskedSubFloat64x4 + OpLessEqualMaskedFloat64x4 + OpLessMaskedFloat64x4 OpMaxFloat64x4 + OpMaxMaskedFloat64x4 OpMinFloat64x4 + OpMinMaskedFloat64x4 OpMulFloat64x4 OpMulByPowOf2Float64x4 + OpMulByPowOf2MaskedFloat64x4 + OpMulMaskedFloat64x4 OpNotEqualFloat64x4 + OpNotEqualMaskedFloat64x4 OpPairwiseAddFloat64x4 OpPairwiseSubFloat64x4 OpRoundFloat64x4 OpSqrtFloat64x4 + OpSqrtMaskedFloat64x4 OpSubFloat64x4 + OpSubMaskedFloat64x4 OpTruncFloat64x4 OpAddFloat64x8 + OpAddMaskedFloat64x8 OpApproximateReciprocalFloat64x8 + OpApproximateReciprocalMaskedFloat64x8 OpApproximateReciprocalOfSqrtFloat64x8 + OpApproximateReciprocalOfSqrtMaskedFloat64x8 OpDivFloat64x8 + OpDivMaskedFloat64x8 OpEqualFloat64x8 + OpEqualMaskedFloat64x8 OpFusedMultiplyAddFloat64x8 + OpFusedMultiplyAddMaskedFloat64x8 OpFusedMultiplyAddSubFloat64x8 + OpFusedMultiplyAddSubMaskedFloat64x8 OpFusedMultiplySubAddFloat64x8 + OpFusedMultiplySubAddMaskedFloat64x8 OpGreaterFloat64x8 OpGreaterEqualFloat64x8 + OpGreaterEqualMaskedFloat64x8 + OpGreaterMaskedFloat64x8 OpIsNanFloat64x8 + OpIsNanMaskedFloat64x8 OpLessFloat64x8 OpLessEqualFloat64x8 - OpMaskedAddFloat64x8 - OpMaskedApproximateReciprocalFloat64x8 - OpMaskedApproximateReciprocalOfSqrtFloat64x8 - OpMaskedDivFloat64x8 - OpMaskedEqualFloat64x8 - OpMaskedFusedMultiplyAddFloat64x8 - OpMaskedFusedMultiplyAddSubFloat64x8 - OpMaskedFusedMultiplySubAddFloat64x8 - OpMaskedGreaterFloat64x8 - OpMaskedGreaterEqualFloat64x8 - OpMaskedIsNanFloat64x8 - OpMaskedLessFloat64x8 - OpMaskedLessEqualFloat64x8 - OpMaskedMaxFloat64x8 - OpMaskedMinFloat64x8 - OpMaskedMulFloat64x8 - OpMaskedMulByPowOf2Float64x8 - OpMaskedNotEqualFloat64x8 - OpMaskedSqrtFloat64x8 - OpMaskedSubFloat64x8 + OpLessEqualMaskedFloat64x8 + OpLessMaskedFloat64x8 OpMaxFloat64x8 + OpMaxMaskedFloat64x8 OpMinFloat64x8 + OpMinMaskedFloat64x8 OpMulFloat64x8 OpMulByPowOf2Float64x8 + OpMulByPowOf2MaskedFloat64x8 + OpMulMaskedFloat64x8 OpNotEqualFloat64x8 + OpNotEqualMaskedFloat64x8 OpSqrtFloat64x8 + OpSqrtMaskedFloat64x8 OpSubFloat64x8 + OpSubMaskedFloat64x8 OpAbsoluteInt16x16 + OpAbsoluteMaskedInt16x16 OpAddInt16x16 + OpAddMaskedInt16x16 OpAndInt16x16 OpAndNotInt16x16 OpEqualInt16x16 + OpEqualMaskedInt16x16 OpGreaterInt16x16 OpGreaterEqualInt16x16 + OpGreaterEqualMaskedInt16x16 + OpGreaterMaskedInt16x16 OpLessInt16x16 OpLessEqualInt16x16 - OpMaskedAbsoluteInt16x16 - OpMaskedAddInt16x16 - OpMaskedEqualInt16x16 - OpMaskedGreaterInt16x16 - OpMaskedGreaterEqualInt16x16 - OpMaskedLessInt16x16 - OpMaskedLessEqualInt16x16 - OpMaskedMaxInt16x16 - OpMaskedMinInt16x16 - OpMaskedMulHighInt16x16 - OpMaskedMulLowInt16x16 - OpMaskedNotEqualInt16x16 - OpMaskedPairDotProdInt16x16 - OpMaskedPopCountInt16x16 - OpMaskedSaturatedAddInt16x16 - OpMaskedSaturatedSubInt16x16 - OpMaskedShiftLeftInt16x16 - OpMaskedShiftLeftAndFillUpperFromInt16x16 - OpMaskedShiftRightInt16x16 - OpMaskedShiftRightAndFillUpperFromInt16x16 - OpMaskedShiftRightSignExtendedInt16x16 - OpMaskedSubInt16x16 + OpLessEqualMaskedInt16x16 + OpLessMaskedInt16x16 OpMaxInt16x16 + OpMaxMaskedInt16x16 OpMinInt16x16 + OpMinMaskedInt16x16 OpMulHighInt16x16 + OpMulHighMaskedInt16x16 OpMulLowInt16x16 + OpMulLowMaskedInt16x16 OpNotEqualInt16x16 + OpNotEqualMaskedInt16x16 OpOrInt16x16 OpPairDotProdInt16x16 + OpPairDotProdMaskedInt16x16 OpPairwiseAddInt16x16 OpPairwiseSubInt16x16 OpPopCountInt16x16 + OpPopCountMaskedInt16x16 OpSaturatedAddInt16x16 + OpSaturatedAddMaskedInt16x16 OpSaturatedPairwiseAddInt16x16 OpSaturatedPairwiseSubInt16x16 OpSaturatedSubInt16x16 + OpSaturatedSubMaskedInt16x16 OpShiftAllLeftInt16x16 OpShiftAllRightInt16x16 OpShiftAllRightSignExtendedInt16x16 OpShiftLeftInt16x16 OpShiftLeftAndFillUpperFromInt16x16 + OpShiftLeftAndFillUpperFromMaskedInt16x16 + OpShiftLeftMaskedInt16x16 OpShiftRightInt16x16 OpShiftRightAndFillUpperFromInt16x16 + OpShiftRightAndFillUpperFromMaskedInt16x16 + OpShiftRightMaskedInt16x16 OpShiftRightSignExtendedInt16x16 + OpShiftRightSignExtendedMaskedInt16x16 OpSignInt16x16 OpSubInt16x16 + OpSubMaskedInt16x16 OpXorInt16x16 OpAbsoluteInt16x32 + OpAbsoluteMaskedInt16x32 OpAddInt16x32 + OpAddMaskedInt16x32 OpEqualInt16x32 + OpEqualMaskedInt16x32 OpGreaterInt16x32 OpGreaterEqualInt16x32 + OpGreaterEqualMaskedInt16x32 + OpGreaterMaskedInt16x32 OpLessInt16x32 OpLessEqualInt16x32 - OpMaskedAbsoluteInt16x32 - OpMaskedAddInt16x32 - OpMaskedEqualInt16x32 - OpMaskedGreaterInt16x32 - OpMaskedGreaterEqualInt16x32 - OpMaskedLessInt16x32 - OpMaskedLessEqualInt16x32 - OpMaskedMaxInt16x32 - OpMaskedMinInt16x32 - OpMaskedMulHighInt16x32 - OpMaskedMulLowInt16x32 - OpMaskedNotEqualInt16x32 - OpMaskedPairDotProdInt16x32 - OpMaskedPopCountInt16x32 - OpMaskedSaturatedAddInt16x32 - OpMaskedSaturatedSubInt16x32 - OpMaskedShiftLeftInt16x32 - OpMaskedShiftLeftAndFillUpperFromInt16x32 - OpMaskedShiftRightInt16x32 - OpMaskedShiftRightAndFillUpperFromInt16x32 - OpMaskedShiftRightSignExtendedInt16x32 - OpMaskedSubInt16x32 + OpLessEqualMaskedInt16x32 + OpLessMaskedInt16x32 OpMaxInt16x32 + OpMaxMaskedInt16x32 OpMinInt16x32 + OpMinMaskedInt16x32 OpMulHighInt16x32 + OpMulHighMaskedInt16x32 OpMulLowInt16x32 + OpMulLowMaskedInt16x32 OpNotEqualInt16x32 + OpNotEqualMaskedInt16x32 OpPairDotProdInt16x32 + OpPairDotProdMaskedInt16x32 OpPopCountInt16x32 + OpPopCountMaskedInt16x32 OpSaturatedAddInt16x32 + OpSaturatedAddMaskedInt16x32 OpSaturatedSubInt16x32 + OpSaturatedSubMaskedInt16x32 OpShiftLeftInt16x32 OpShiftLeftAndFillUpperFromInt16x32 + OpShiftLeftAndFillUpperFromMaskedInt16x32 + OpShiftLeftMaskedInt16x32 OpShiftRightInt16x32 OpShiftRightAndFillUpperFromInt16x32 + OpShiftRightAndFillUpperFromMaskedInt16x32 + OpShiftRightMaskedInt16x32 OpShiftRightSignExtendedInt16x32 + OpShiftRightSignExtendedMaskedInt16x32 OpSubInt16x32 + OpSubMaskedInt16x32 OpAbsoluteInt16x8 + OpAbsoluteMaskedInt16x8 OpAddInt16x8 + OpAddMaskedInt16x8 OpAndInt16x8 OpAndNotInt16x8 OpEqualInt16x8 + OpEqualMaskedInt16x8 OpGreaterInt16x8 OpGreaterEqualInt16x8 + OpGreaterEqualMaskedInt16x8 + OpGreaterMaskedInt16x8 OpLessInt16x8 OpLessEqualInt16x8 - OpMaskedAbsoluteInt16x8 - OpMaskedAddInt16x8 - OpMaskedEqualInt16x8 - OpMaskedGreaterInt16x8 - OpMaskedGreaterEqualInt16x8 - OpMaskedLessInt16x8 - OpMaskedLessEqualInt16x8 - OpMaskedMaxInt16x8 - OpMaskedMinInt16x8 - OpMaskedMulHighInt16x8 - OpMaskedMulLowInt16x8 - OpMaskedNotEqualInt16x8 - OpMaskedPairDotProdInt16x8 - OpMaskedPopCountInt16x8 - OpMaskedSaturatedAddInt16x8 - OpMaskedSaturatedSubInt16x8 - OpMaskedShiftLeftInt16x8 - OpMaskedShiftLeftAndFillUpperFromInt16x8 - OpMaskedShiftRightInt16x8 - OpMaskedShiftRightAndFillUpperFromInt16x8 - OpMaskedShiftRightSignExtendedInt16x8 - OpMaskedSubInt16x8 + OpLessEqualMaskedInt16x8 + OpLessMaskedInt16x8 OpMaxInt16x8 + OpMaxMaskedInt16x8 OpMinInt16x8 + OpMinMaskedInt16x8 OpMulHighInt16x8 + OpMulHighMaskedInt16x8 OpMulLowInt16x8 + OpMulLowMaskedInt16x8 OpNotEqualInt16x8 + OpNotEqualMaskedInt16x8 OpOrInt16x8 OpPairDotProdInt16x8 + OpPairDotProdMaskedInt16x8 OpPairwiseAddInt16x8 OpPairwiseSubInt16x8 OpPopCountInt16x8 + OpPopCountMaskedInt16x8 OpSaturatedAddInt16x8 + OpSaturatedAddMaskedInt16x8 OpSaturatedPairwiseAddInt16x8 OpSaturatedPairwiseSubInt16x8 OpSaturatedSubInt16x8 + OpSaturatedSubMaskedInt16x8 OpShiftAllLeftInt16x8 OpShiftAllRightInt16x8 OpShiftAllRightSignExtendedInt16x8 OpShiftLeftInt16x8 OpShiftLeftAndFillUpperFromInt16x8 + OpShiftLeftAndFillUpperFromMaskedInt16x8 + OpShiftLeftMaskedInt16x8 OpShiftRightInt16x8 OpShiftRightAndFillUpperFromInt16x8 + OpShiftRightAndFillUpperFromMaskedInt16x8 + OpShiftRightMaskedInt16x8 OpShiftRightSignExtendedInt16x8 + OpShiftRightSignExtendedMaskedInt16x8 OpSignInt16x8 OpSubInt16x8 + OpSubMaskedInt16x8 OpXorInt16x8 OpAbsoluteInt32x16 + OpAbsoluteMaskedInt32x16 OpAddInt32x16 + OpAddMaskedInt32x16 OpAndInt32x16 + OpAndMaskedInt32x16 OpAndNotInt32x16 + OpAndNotMaskedInt32x16 OpEqualInt32x16 + OpEqualMaskedInt32x16 OpGreaterInt32x16 OpGreaterEqualInt32x16 + OpGreaterEqualMaskedInt32x16 + OpGreaterMaskedInt32x16 OpLessInt32x16 OpLessEqualInt32x16 - OpMaskedAbsoluteInt32x16 - OpMaskedAddInt32x16 - OpMaskedAndInt32x16 - OpMaskedAndNotInt32x16 - OpMaskedEqualInt32x16 - OpMaskedGreaterInt32x16 - OpMaskedGreaterEqualInt32x16 - OpMaskedLessInt32x16 - OpMaskedLessEqualInt32x16 - OpMaskedMaxInt32x16 - OpMaskedMinInt32x16 - OpMaskedMulLowInt32x16 - OpMaskedNotEqualInt32x16 - OpMaskedOrInt32x16 - OpMaskedPairDotProdAccumulateInt32x16 - OpMaskedPopCountInt32x16 - OpMaskedRotateLeftInt32x16 - OpMaskedRotateRightInt32x16 - OpMaskedSaturatedPairDotProdAccumulateInt32x16 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 - OpMaskedShiftLeftInt32x16 - OpMaskedShiftLeftAndFillUpperFromInt32x16 - OpMaskedShiftRightInt32x16 - OpMaskedShiftRightAndFillUpperFromInt32x16 - OpMaskedShiftRightSignExtendedInt32x16 - OpMaskedSubInt32x16 - OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16 - OpMaskedXorInt32x16 + OpLessEqualMaskedInt32x16 + OpLessMaskedInt32x16 OpMaxInt32x16 + OpMaxMaskedInt32x16 OpMinInt32x16 + OpMinMaskedInt32x16 OpMulLowInt32x16 + OpMulLowMaskedInt32x16 OpNotEqualInt32x16 + OpNotEqualMaskedInt32x16 OpOrInt32x16 + OpOrMaskedInt32x16 OpPairDotProdAccumulateInt32x16 + OpPairDotProdAccumulateMaskedInt32x16 OpPopCountInt32x16 + OpPopCountMaskedInt32x16 OpRotateLeftInt32x16 + OpRotateLeftMaskedInt32x16 OpRotateRightInt32x16 + OpRotateRightMaskedInt32x16 OpSaturatedPairDotProdAccumulateInt32x16 + OpSaturatedPairDotProdAccumulateMaskedInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpShiftLeftInt32x16 OpShiftLeftAndFillUpperFromInt32x16 + OpShiftLeftAndFillUpperFromMaskedInt32x16 + OpShiftLeftMaskedInt32x16 OpShiftRightInt32x16 OpShiftRightAndFillUpperFromInt32x16 + OpShiftRightAndFillUpperFromMaskedInt32x16 + OpShiftRightMaskedInt32x16 OpShiftRightSignExtendedInt32x16 + OpShiftRightSignExtendedMaskedInt32x16 OpSubInt32x16 + OpSubMaskedInt32x16 OpUnsignedSignedQuadDotProdAccumulateInt32x16 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpXorInt32x16 + OpXorMaskedInt32x16 OpAbsoluteInt32x4 + OpAbsoluteMaskedInt32x4 OpAddInt32x4 + OpAddMaskedInt32x4 OpAndInt32x4 + OpAndMaskedInt32x4 OpAndNotInt32x4 + OpAndNotMaskedInt32x4 OpEqualInt32x4 + OpEqualMaskedInt32x4 OpGreaterInt32x4 OpGreaterEqualInt32x4 + OpGreaterEqualMaskedInt32x4 + OpGreaterMaskedInt32x4 OpLessInt32x4 OpLessEqualInt32x4 - OpMaskedAbsoluteInt32x4 - OpMaskedAddInt32x4 - OpMaskedAndInt32x4 - OpMaskedAndNotInt32x4 - OpMaskedEqualInt32x4 - OpMaskedGreaterInt32x4 - OpMaskedGreaterEqualInt32x4 - OpMaskedLessInt32x4 - OpMaskedLessEqualInt32x4 - OpMaskedMaxInt32x4 - OpMaskedMinInt32x4 - OpMaskedMulLowInt32x4 - OpMaskedNotEqualInt32x4 - OpMaskedOrInt32x4 - OpMaskedPairDotProdAccumulateInt32x4 - OpMaskedPopCountInt32x4 - OpMaskedRotateLeftInt32x4 - OpMaskedRotateRightInt32x4 - OpMaskedSaturatedPairDotProdAccumulateInt32x4 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpMaskedShiftLeftInt32x4 - OpMaskedShiftLeftAndFillUpperFromInt32x4 - OpMaskedShiftRightInt32x4 - OpMaskedShiftRightAndFillUpperFromInt32x4 - OpMaskedShiftRightSignExtendedInt32x4 - OpMaskedSubInt32x4 - OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpMaskedXorInt32x4 + OpLessEqualMaskedInt32x4 + OpLessMaskedInt32x4 OpMaxInt32x4 + OpMaxMaskedInt32x4 OpMinInt32x4 + OpMinMaskedInt32x4 OpMulEvenWidenInt32x4 OpMulLowInt32x4 + OpMulLowMaskedInt32x4 OpNotEqualInt32x4 + OpNotEqualMaskedInt32x4 OpOrInt32x4 + OpOrMaskedInt32x4 OpPairDotProdAccumulateInt32x4 + OpPairDotProdAccumulateMaskedInt32x4 OpPairwiseAddInt32x4 OpPairwiseSubInt32x4 OpPopCountInt32x4 + OpPopCountMaskedInt32x4 OpRotateLeftInt32x4 + OpRotateLeftMaskedInt32x4 OpRotateRightInt32x4 + OpRotateRightMaskedInt32x4 OpSaturatedPairDotProdAccumulateInt32x4 + OpSaturatedPairDotProdAccumulateMaskedInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpShiftAllLeftInt32x4 OpShiftAllRightInt32x4 OpShiftAllRightSignExtendedInt32x4 OpShiftLeftInt32x4 OpShiftLeftAndFillUpperFromInt32x4 + OpShiftLeftAndFillUpperFromMaskedInt32x4 + OpShiftLeftMaskedInt32x4 OpShiftRightInt32x4 OpShiftRightAndFillUpperFromInt32x4 + OpShiftRightAndFillUpperFromMaskedInt32x4 + OpShiftRightMaskedInt32x4 OpShiftRightSignExtendedInt32x4 + OpShiftRightSignExtendedMaskedInt32x4 OpSignInt32x4 OpSubInt32x4 + OpSubMaskedInt32x4 OpUnsignedSignedQuadDotProdAccumulateInt32x4 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpXorInt32x4 + OpXorMaskedInt32x4 OpAbsoluteInt32x8 + OpAbsoluteMaskedInt32x8 OpAddInt32x8 + OpAddMaskedInt32x8 OpAndInt32x8 + OpAndMaskedInt32x8 OpAndNotInt32x8 + OpAndNotMaskedInt32x8 OpEqualInt32x8 + OpEqualMaskedInt32x8 OpGreaterInt32x8 OpGreaterEqualInt32x8 + OpGreaterEqualMaskedInt32x8 + OpGreaterMaskedInt32x8 OpLessInt32x8 OpLessEqualInt32x8 - OpMaskedAbsoluteInt32x8 - OpMaskedAddInt32x8 - OpMaskedAndInt32x8 - OpMaskedAndNotInt32x8 - OpMaskedEqualInt32x8 - OpMaskedGreaterInt32x8 - OpMaskedGreaterEqualInt32x8 - OpMaskedLessInt32x8 - OpMaskedLessEqualInt32x8 - OpMaskedMaxInt32x8 - OpMaskedMinInt32x8 - OpMaskedMulLowInt32x8 - OpMaskedNotEqualInt32x8 - OpMaskedOrInt32x8 - OpMaskedPairDotProdAccumulateInt32x8 - OpMaskedPopCountInt32x8 - OpMaskedRotateLeftInt32x8 - OpMaskedRotateRightInt32x8 - OpMaskedSaturatedPairDotProdAccumulateInt32x8 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpMaskedShiftLeftInt32x8 - OpMaskedShiftLeftAndFillUpperFromInt32x8 - OpMaskedShiftRightInt32x8 - OpMaskedShiftRightAndFillUpperFromInt32x8 - OpMaskedShiftRightSignExtendedInt32x8 - OpMaskedSubInt32x8 - OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpMaskedXorInt32x8 + OpLessEqualMaskedInt32x8 + OpLessMaskedInt32x8 OpMaxInt32x8 + OpMaxMaskedInt32x8 OpMinInt32x8 + OpMinMaskedInt32x8 OpMulEvenWidenInt32x8 OpMulLowInt32x8 + OpMulLowMaskedInt32x8 OpNotEqualInt32x8 + OpNotEqualMaskedInt32x8 OpOrInt32x8 + OpOrMaskedInt32x8 OpPairDotProdAccumulateInt32x8 + OpPairDotProdAccumulateMaskedInt32x8 OpPairwiseAddInt32x8 OpPairwiseSubInt32x8 OpPopCountInt32x8 + OpPopCountMaskedInt32x8 OpRotateLeftInt32x8 + OpRotateLeftMaskedInt32x8 OpRotateRightInt32x8 + OpRotateRightMaskedInt32x8 OpSaturatedPairDotProdAccumulateInt32x8 + OpSaturatedPairDotProdAccumulateMaskedInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpShiftAllLeftInt32x8 OpShiftAllRightInt32x8 OpShiftAllRightSignExtendedInt32x8 OpShiftLeftInt32x8 OpShiftLeftAndFillUpperFromInt32x8 + OpShiftLeftAndFillUpperFromMaskedInt32x8 + OpShiftLeftMaskedInt32x8 OpShiftRightInt32x8 OpShiftRightAndFillUpperFromInt32x8 + OpShiftRightAndFillUpperFromMaskedInt32x8 + OpShiftRightMaskedInt32x8 OpShiftRightSignExtendedInt32x8 + OpShiftRightSignExtendedMaskedInt32x8 OpSignInt32x8 OpSubInt32x8 + OpSubMaskedInt32x8 OpUnsignedSignedQuadDotProdAccumulateInt32x8 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpXorInt32x8 + OpXorMaskedInt32x8 OpAbsoluteInt64x2 + OpAbsoluteMaskedInt64x2 OpAddInt64x2 + OpAddMaskedInt64x2 OpAndInt64x2 + OpAndMaskedInt64x2 OpAndNotInt64x2 + OpAndNotMaskedInt64x2 OpEqualInt64x2 + OpEqualMaskedInt64x2 OpGreaterInt64x2 OpGreaterEqualInt64x2 + OpGreaterEqualMaskedInt64x2 + OpGreaterMaskedInt64x2 OpLessInt64x2 OpLessEqualInt64x2 - OpMaskedAbsoluteInt64x2 - OpMaskedAddInt64x2 - OpMaskedAndInt64x2 - OpMaskedAndNotInt64x2 - OpMaskedEqualInt64x2 - OpMaskedGreaterInt64x2 - OpMaskedGreaterEqualInt64x2 - OpMaskedLessInt64x2 - OpMaskedLessEqualInt64x2 - OpMaskedMaxInt64x2 - OpMaskedMinInt64x2 - OpMaskedMulEvenWidenInt64x2 - OpMaskedMulLowInt64x2 - OpMaskedNotEqualInt64x2 - OpMaskedOrInt64x2 - OpMaskedPopCountInt64x2 - OpMaskedRotateLeftInt64x2 - OpMaskedRotateRightInt64x2 - OpMaskedShiftAllLeftInt64x2 - OpMaskedShiftAllRightInt64x2 - OpMaskedShiftAllRightSignExtendedInt64x2 - OpMaskedShiftLeftInt64x2 - OpMaskedShiftLeftAndFillUpperFromInt64x2 - OpMaskedShiftRightInt64x2 - OpMaskedShiftRightAndFillUpperFromInt64x2 - OpMaskedShiftRightSignExtendedInt64x2 - OpMaskedSubInt64x2 - OpMaskedXorInt64x2 + OpLessEqualMaskedInt64x2 + OpLessMaskedInt64x2 OpMaxInt64x2 + OpMaxMaskedInt64x2 OpMinInt64x2 + OpMinMaskedInt64x2 OpMulEvenWidenInt64x2 + OpMulEvenWidenMaskedInt64x2 OpMulLowInt64x2 + OpMulLowMaskedInt64x2 OpNotEqualInt64x2 + OpNotEqualMaskedInt64x2 OpOrInt64x2 + OpOrMaskedInt64x2 OpPopCountInt64x2 + OpPopCountMaskedInt64x2 OpRotateLeftInt64x2 + OpRotateLeftMaskedInt64x2 OpRotateRightInt64x2 + OpRotateRightMaskedInt64x2 OpShiftAllLeftInt64x2 + OpShiftAllLeftMaskedInt64x2 OpShiftAllRightInt64x2 + OpShiftAllRightMaskedInt64x2 OpShiftAllRightSignExtendedInt64x2 + OpShiftAllRightSignExtendedMaskedInt64x2 OpShiftLeftInt64x2 OpShiftLeftAndFillUpperFromInt64x2 + OpShiftLeftAndFillUpperFromMaskedInt64x2 + OpShiftLeftMaskedInt64x2 OpShiftRightInt64x2 OpShiftRightAndFillUpperFromInt64x2 + OpShiftRightAndFillUpperFromMaskedInt64x2 + OpShiftRightMaskedInt64x2 OpShiftRightSignExtendedInt64x2 + OpShiftRightSignExtendedMaskedInt64x2 OpSubInt64x2 + OpSubMaskedInt64x2 OpXorInt64x2 + OpXorMaskedInt64x2 OpAbsoluteInt64x4 + OpAbsoluteMaskedInt64x4 OpAddInt64x4 + OpAddMaskedInt64x4 OpAndInt64x4 + OpAndMaskedInt64x4 OpAndNotInt64x4 + OpAndNotMaskedInt64x4 OpEqualInt64x4 + OpEqualMaskedInt64x4 OpGreaterInt64x4 OpGreaterEqualInt64x4 + OpGreaterEqualMaskedInt64x4 + OpGreaterMaskedInt64x4 OpLessInt64x4 OpLessEqualInt64x4 - OpMaskedAbsoluteInt64x4 - OpMaskedAddInt64x4 - OpMaskedAndInt64x4 - OpMaskedAndNotInt64x4 - OpMaskedEqualInt64x4 - OpMaskedGreaterInt64x4 - OpMaskedGreaterEqualInt64x4 - OpMaskedLessInt64x4 - OpMaskedLessEqualInt64x4 - OpMaskedMaxInt64x4 - OpMaskedMinInt64x4 - OpMaskedMulEvenWidenInt64x4 - OpMaskedMulLowInt64x4 - OpMaskedNotEqualInt64x4 - OpMaskedOrInt64x4 - OpMaskedPopCountInt64x4 - OpMaskedRotateLeftInt64x4 - OpMaskedRotateRightInt64x4 - OpMaskedShiftAllLeftInt64x4 - OpMaskedShiftAllRightInt64x4 - OpMaskedShiftAllRightSignExtendedInt64x4 - OpMaskedShiftLeftInt64x4 - OpMaskedShiftLeftAndFillUpperFromInt64x4 - OpMaskedShiftRightInt64x4 - OpMaskedShiftRightAndFillUpperFromInt64x4 - OpMaskedShiftRightSignExtendedInt64x4 - OpMaskedSubInt64x4 - OpMaskedXorInt64x4 + OpLessEqualMaskedInt64x4 + OpLessMaskedInt64x4 OpMaxInt64x4 + OpMaxMaskedInt64x4 OpMinInt64x4 + OpMinMaskedInt64x4 OpMulEvenWidenInt64x4 + OpMulEvenWidenMaskedInt64x4 OpMulLowInt64x4 + OpMulLowMaskedInt64x4 OpNotEqualInt64x4 + OpNotEqualMaskedInt64x4 OpOrInt64x4 + OpOrMaskedInt64x4 OpPopCountInt64x4 + OpPopCountMaskedInt64x4 OpRotateLeftInt64x4 + OpRotateLeftMaskedInt64x4 OpRotateRightInt64x4 + OpRotateRightMaskedInt64x4 OpShiftAllLeftInt64x4 + OpShiftAllLeftMaskedInt64x4 OpShiftAllRightInt64x4 + OpShiftAllRightMaskedInt64x4 OpShiftAllRightSignExtendedInt64x4 + OpShiftAllRightSignExtendedMaskedInt64x4 OpShiftLeftInt64x4 OpShiftLeftAndFillUpperFromInt64x4 + OpShiftLeftAndFillUpperFromMaskedInt64x4 + OpShiftLeftMaskedInt64x4 OpShiftRightInt64x4 OpShiftRightAndFillUpperFromInt64x4 + OpShiftRightAndFillUpperFromMaskedInt64x4 + OpShiftRightMaskedInt64x4 OpShiftRightSignExtendedInt64x4 + OpShiftRightSignExtendedMaskedInt64x4 OpSubInt64x4 + OpSubMaskedInt64x4 OpXorInt64x4 + OpXorMaskedInt64x4 OpAbsoluteInt64x8 + OpAbsoluteMaskedInt64x8 OpAddInt64x8 + OpAddMaskedInt64x8 OpAndInt64x8 + OpAndMaskedInt64x8 OpAndNotInt64x8 + OpAndNotMaskedInt64x8 OpEqualInt64x8 + OpEqualMaskedInt64x8 OpGreaterInt64x8 OpGreaterEqualInt64x8 + OpGreaterEqualMaskedInt64x8 + OpGreaterMaskedInt64x8 OpLessInt64x8 OpLessEqualInt64x8 - OpMaskedAbsoluteInt64x8 - OpMaskedAddInt64x8 - OpMaskedAndInt64x8 - OpMaskedAndNotInt64x8 - OpMaskedEqualInt64x8 - OpMaskedGreaterInt64x8 - OpMaskedGreaterEqualInt64x8 - OpMaskedLessInt64x8 - OpMaskedLessEqualInt64x8 - OpMaskedMaxInt64x8 - OpMaskedMinInt64x8 - OpMaskedMulEvenWidenInt64x8 - OpMaskedMulLowInt64x8 - OpMaskedNotEqualInt64x8 - OpMaskedOrInt64x8 - OpMaskedPopCountInt64x8 - OpMaskedRotateLeftInt64x8 - OpMaskedRotateRightInt64x8 - OpMaskedShiftAllLeftInt64x8 - OpMaskedShiftAllRightInt64x8 - OpMaskedShiftAllRightSignExtendedInt64x8 - OpMaskedShiftLeftInt64x8 - OpMaskedShiftLeftAndFillUpperFromInt64x8 - OpMaskedShiftRightInt64x8 - OpMaskedShiftRightAndFillUpperFromInt64x8 - OpMaskedShiftRightSignExtendedInt64x8 - OpMaskedSubInt64x8 - OpMaskedXorInt64x8 + OpLessEqualMaskedInt64x8 + OpLessMaskedInt64x8 OpMaxInt64x8 + OpMaxMaskedInt64x8 OpMinInt64x8 + OpMinMaskedInt64x8 OpMulEvenWidenInt64x8 + OpMulEvenWidenMaskedInt64x8 OpMulLowInt64x8 + OpMulLowMaskedInt64x8 OpNotEqualInt64x8 + OpNotEqualMaskedInt64x8 OpOrInt64x8 + OpOrMaskedInt64x8 OpPopCountInt64x8 + OpPopCountMaskedInt64x8 OpRotateLeftInt64x8 + OpRotateLeftMaskedInt64x8 OpRotateRightInt64x8 + OpRotateRightMaskedInt64x8 OpShiftAllLeftInt64x8 + OpShiftAllLeftMaskedInt64x8 OpShiftAllRightInt64x8 + OpShiftAllRightMaskedInt64x8 OpShiftAllRightSignExtendedInt64x8 + OpShiftAllRightSignExtendedMaskedInt64x8 OpShiftLeftInt64x8 OpShiftLeftAndFillUpperFromInt64x8 + OpShiftLeftAndFillUpperFromMaskedInt64x8 + OpShiftLeftMaskedInt64x8 OpShiftRightInt64x8 OpShiftRightAndFillUpperFromInt64x8 + OpShiftRightAndFillUpperFromMaskedInt64x8 + OpShiftRightMaskedInt64x8 OpShiftRightSignExtendedInt64x8 + OpShiftRightSignExtendedMaskedInt64x8 OpSubInt64x8 + OpSubMaskedInt64x8 OpXorInt64x8 + OpXorMaskedInt64x8 OpAbsoluteInt8x16 + OpAbsoluteMaskedInt8x16 OpAddInt8x16 + OpAddMaskedInt8x16 OpAndInt8x16 OpAndNotInt8x16 OpEqualInt8x16 + OpEqualMaskedInt8x16 OpGreaterInt8x16 OpGreaterEqualInt8x16 + OpGreaterEqualMaskedInt8x16 + OpGreaterMaskedInt8x16 OpLessInt8x16 OpLessEqualInt8x16 - OpMaskedAbsoluteInt8x16 - OpMaskedAddInt8x16 - OpMaskedEqualInt8x16 - OpMaskedGreaterInt8x16 - OpMaskedGreaterEqualInt8x16 - OpMaskedLessInt8x16 - OpMaskedLessEqualInt8x16 - OpMaskedMaxInt8x16 - OpMaskedMinInt8x16 - OpMaskedNotEqualInt8x16 - OpMaskedPopCountInt8x16 - OpMaskedSaturatedAddInt8x16 - OpMaskedSaturatedSubInt8x16 - OpMaskedSubInt8x16 + OpLessEqualMaskedInt8x16 + OpLessMaskedInt8x16 OpMaxInt8x16 + OpMaxMaskedInt8x16 OpMinInt8x16 + OpMinMaskedInt8x16 OpNotEqualInt8x16 + OpNotEqualMaskedInt8x16 OpOrInt8x16 OpPopCountInt8x16 + OpPopCountMaskedInt8x16 OpSaturatedAddInt8x16 + OpSaturatedAddMaskedInt8x16 OpSaturatedSubInt8x16 + OpSaturatedSubMaskedInt8x16 OpSignInt8x16 OpSubInt8x16 + OpSubMaskedInt8x16 OpXorInt8x16 OpAbsoluteInt8x32 + OpAbsoluteMaskedInt8x32 OpAddInt8x32 + OpAddMaskedInt8x32 OpAndInt8x32 OpAndNotInt8x32 OpEqualInt8x32 + OpEqualMaskedInt8x32 OpGreaterInt8x32 OpGreaterEqualInt8x32 + OpGreaterEqualMaskedInt8x32 + OpGreaterMaskedInt8x32 OpLessInt8x32 OpLessEqualInt8x32 - OpMaskedAbsoluteInt8x32 - OpMaskedAddInt8x32 - OpMaskedEqualInt8x32 - OpMaskedGreaterInt8x32 - OpMaskedGreaterEqualInt8x32 - OpMaskedLessInt8x32 - OpMaskedLessEqualInt8x32 - OpMaskedMaxInt8x32 - OpMaskedMinInt8x32 - OpMaskedNotEqualInt8x32 - OpMaskedPopCountInt8x32 - OpMaskedSaturatedAddInt8x32 - OpMaskedSaturatedSubInt8x32 - OpMaskedSubInt8x32 + OpLessEqualMaskedInt8x32 + OpLessMaskedInt8x32 OpMaxInt8x32 + OpMaxMaskedInt8x32 OpMinInt8x32 + OpMinMaskedInt8x32 OpNotEqualInt8x32 + OpNotEqualMaskedInt8x32 OpOrInt8x32 OpPopCountInt8x32 + OpPopCountMaskedInt8x32 OpSaturatedAddInt8x32 + OpSaturatedAddMaskedInt8x32 OpSaturatedSubInt8x32 + OpSaturatedSubMaskedInt8x32 OpSignInt8x32 OpSubInt8x32 + OpSubMaskedInt8x32 OpXorInt8x32 OpAbsoluteInt8x64 + OpAbsoluteMaskedInt8x64 OpAddInt8x64 + OpAddMaskedInt8x64 OpEqualInt8x64 + OpEqualMaskedInt8x64 OpGreaterInt8x64 OpGreaterEqualInt8x64 + OpGreaterEqualMaskedInt8x64 + OpGreaterMaskedInt8x64 OpLessInt8x64 OpLessEqualInt8x64 - OpMaskedAbsoluteInt8x64 - OpMaskedAddInt8x64 - OpMaskedEqualInt8x64 - OpMaskedGreaterInt8x64 - OpMaskedGreaterEqualInt8x64 - OpMaskedLessInt8x64 - OpMaskedLessEqualInt8x64 - OpMaskedMaxInt8x64 - OpMaskedMinInt8x64 - OpMaskedNotEqualInt8x64 - OpMaskedPopCountInt8x64 - OpMaskedSaturatedAddInt8x64 - OpMaskedSaturatedSubInt8x64 - OpMaskedSubInt8x64 + OpLessEqualMaskedInt8x64 + OpLessMaskedInt8x64 OpMaxInt8x64 + OpMaxMaskedInt8x64 OpMinInt8x64 + OpMinMaskedInt8x64 OpNotEqualInt8x64 + OpNotEqualMaskedInt8x64 OpPopCountInt8x64 + OpPopCountMaskedInt8x64 OpSaturatedAddInt8x64 + OpSaturatedAddMaskedInt8x64 OpSaturatedSubInt8x64 + OpSaturatedSubMaskedInt8x64 OpSubInt8x64 + OpSubMaskedInt8x64 OpAddUint16x16 + OpAddMaskedUint16x16 OpAndUint16x16 OpAndNotUint16x16 OpAverageUint16x16 + OpAverageMaskedUint16x16 OpEqualUint16x16 + OpEqualMaskedUint16x16 OpGreaterUint16x16 OpGreaterEqualUint16x16 + OpGreaterEqualMaskedUint16x16 + OpGreaterMaskedUint16x16 OpLessUint16x16 OpLessEqualUint16x16 - OpMaskedAddUint16x16 - OpMaskedAverageUint16x16 - OpMaskedEqualUint16x16 - OpMaskedGreaterUint16x16 - OpMaskedGreaterEqualUint16x16 - OpMaskedLessUint16x16 - OpMaskedLessEqualUint16x16 - OpMaskedMaxUint16x16 - OpMaskedMinUint16x16 - OpMaskedMulHighUint16x16 - OpMaskedNotEqualUint16x16 - OpMaskedPopCountUint16x16 - OpMaskedSaturatedAddUint16x16 - OpMaskedSaturatedSubUint16x16 - OpMaskedShiftLeftUint16x16 - OpMaskedShiftLeftAndFillUpperFromUint16x16 - OpMaskedShiftRightUint16x16 - OpMaskedShiftRightAndFillUpperFromUint16x16 - OpMaskedShiftRightSignExtendedUint16x16 - OpMaskedSubUint16x16 + OpLessEqualMaskedUint16x16 + OpLessMaskedUint16x16 OpMaxUint16x16 + OpMaxMaskedUint16x16 OpMinUint16x16 + OpMinMaskedUint16x16 OpMulHighUint16x16 + OpMulHighMaskedUint16x16 OpNotEqualUint16x16 + OpNotEqualMaskedUint16x16 OpOrUint16x16 OpPairwiseAddUint16x16 OpPairwiseSubUint16x16 OpPopCountUint16x16 + OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 + OpSaturatedAddMaskedUint16x16 OpSaturatedSubUint16x16 + OpSaturatedSubMaskedUint16x16 OpShiftAllLeftUint16x16 OpShiftAllRightUint16x16 OpShiftLeftUint16x16 OpShiftLeftAndFillUpperFromUint16x16 + OpShiftLeftAndFillUpperFromMaskedUint16x16 + OpShiftLeftMaskedUint16x16 OpShiftRightUint16x16 OpShiftRightAndFillUpperFromUint16x16 + OpShiftRightAndFillUpperFromMaskedUint16x16 + OpShiftRightMaskedUint16x16 OpShiftRightSignExtendedUint16x16 + OpShiftRightSignExtendedMaskedUint16x16 OpSubUint16x16 + OpSubMaskedUint16x16 OpXorUint16x16 OpAddUint16x32 + OpAddMaskedUint16x32 OpAverageUint16x32 + OpAverageMaskedUint16x32 OpEqualUint16x32 + OpEqualMaskedUint16x32 OpGreaterUint16x32 OpGreaterEqualUint16x32 + OpGreaterEqualMaskedUint16x32 + OpGreaterMaskedUint16x32 OpLessUint16x32 OpLessEqualUint16x32 - OpMaskedAddUint16x32 - OpMaskedAverageUint16x32 - OpMaskedEqualUint16x32 - OpMaskedGreaterUint16x32 - OpMaskedGreaterEqualUint16x32 - OpMaskedLessUint16x32 - OpMaskedLessEqualUint16x32 - OpMaskedMaxUint16x32 - OpMaskedMinUint16x32 - OpMaskedMulHighUint16x32 - OpMaskedNotEqualUint16x32 - OpMaskedPopCountUint16x32 - OpMaskedSaturatedAddUint16x32 - OpMaskedSaturatedSubUint16x32 - OpMaskedShiftLeftUint16x32 - OpMaskedShiftLeftAndFillUpperFromUint16x32 - OpMaskedShiftRightUint16x32 - OpMaskedShiftRightAndFillUpperFromUint16x32 - OpMaskedShiftRightSignExtendedUint16x32 - OpMaskedSubUint16x32 + OpLessEqualMaskedUint16x32 + OpLessMaskedUint16x32 OpMaxUint16x32 + OpMaxMaskedUint16x32 OpMinUint16x32 + OpMinMaskedUint16x32 OpMulHighUint16x32 + OpMulHighMaskedUint16x32 OpNotEqualUint16x32 + OpNotEqualMaskedUint16x32 OpPopCountUint16x32 + OpPopCountMaskedUint16x32 OpSaturatedAddUint16x32 + OpSaturatedAddMaskedUint16x32 OpSaturatedSubUint16x32 + OpSaturatedSubMaskedUint16x32 OpShiftLeftUint16x32 OpShiftLeftAndFillUpperFromUint16x32 + OpShiftLeftAndFillUpperFromMaskedUint16x32 + OpShiftLeftMaskedUint16x32 OpShiftRightUint16x32 OpShiftRightAndFillUpperFromUint16x32 + OpShiftRightAndFillUpperFromMaskedUint16x32 + OpShiftRightMaskedUint16x32 OpShiftRightSignExtendedUint16x32 + OpShiftRightSignExtendedMaskedUint16x32 OpSubUint16x32 + OpSubMaskedUint16x32 OpAddUint16x8 + OpAddMaskedUint16x8 OpAndUint16x8 OpAndNotUint16x8 OpAverageUint16x8 + OpAverageMaskedUint16x8 OpEqualUint16x8 + OpEqualMaskedUint16x8 OpGreaterUint16x8 OpGreaterEqualUint16x8 + OpGreaterEqualMaskedUint16x8 + OpGreaterMaskedUint16x8 OpLessUint16x8 OpLessEqualUint16x8 - OpMaskedAddUint16x8 - OpMaskedAverageUint16x8 - OpMaskedEqualUint16x8 - OpMaskedGreaterUint16x8 - OpMaskedGreaterEqualUint16x8 - OpMaskedLessUint16x8 - OpMaskedLessEqualUint16x8 - OpMaskedMaxUint16x8 - OpMaskedMinUint16x8 - OpMaskedMulHighUint16x8 - OpMaskedNotEqualUint16x8 - OpMaskedPopCountUint16x8 - OpMaskedSaturatedAddUint16x8 - OpMaskedSaturatedSubUint16x8 - OpMaskedShiftLeftUint16x8 - OpMaskedShiftLeftAndFillUpperFromUint16x8 - OpMaskedShiftRightUint16x8 - OpMaskedShiftRightAndFillUpperFromUint16x8 - OpMaskedShiftRightSignExtendedUint16x8 - OpMaskedSubUint16x8 + OpLessEqualMaskedUint16x8 + OpLessMaskedUint16x8 OpMaxUint16x8 + OpMaxMaskedUint16x8 OpMinUint16x8 + OpMinMaskedUint16x8 OpMulHighUint16x8 + OpMulHighMaskedUint16x8 OpNotEqualUint16x8 + OpNotEqualMaskedUint16x8 OpOrUint16x8 OpPairwiseAddUint16x8 OpPairwiseSubUint16x8 OpPopCountUint16x8 + OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 + OpSaturatedAddMaskedUint16x8 OpSaturatedSubUint16x8 + OpSaturatedSubMaskedUint16x8 OpShiftAllLeftUint16x8 OpShiftAllRightUint16x8 OpShiftLeftUint16x8 OpShiftLeftAndFillUpperFromUint16x8 + OpShiftLeftAndFillUpperFromMaskedUint16x8 + OpShiftLeftMaskedUint16x8 OpShiftRightUint16x8 OpShiftRightAndFillUpperFromUint16x8 + OpShiftRightAndFillUpperFromMaskedUint16x8 + OpShiftRightMaskedUint16x8 OpShiftRightSignExtendedUint16x8 + OpShiftRightSignExtendedMaskedUint16x8 OpSubUint16x8 + OpSubMaskedUint16x8 OpXorUint16x8 OpAddUint32x16 + OpAddMaskedUint32x16 OpAndUint32x16 + OpAndMaskedUint32x16 OpAndNotUint32x16 + OpAndNotMaskedUint32x16 OpEqualUint32x16 + OpEqualMaskedUint32x16 OpGreaterUint32x16 OpGreaterEqualUint32x16 + OpGreaterEqualMaskedUint32x16 + OpGreaterMaskedUint32x16 OpLessUint32x16 OpLessEqualUint32x16 - OpMaskedAddUint32x16 - OpMaskedAndUint32x16 - OpMaskedAndNotUint32x16 - OpMaskedEqualUint32x16 - OpMaskedGreaterUint32x16 - OpMaskedGreaterEqualUint32x16 - OpMaskedLessUint32x16 - OpMaskedLessEqualUint32x16 - OpMaskedMaxUint32x16 - OpMaskedMinUint32x16 - OpMaskedNotEqualUint32x16 - OpMaskedOrUint32x16 - OpMaskedPopCountUint32x16 - OpMaskedRotateLeftUint32x16 - OpMaskedRotateRightUint32x16 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 - OpMaskedShiftLeftUint32x16 - OpMaskedShiftLeftAndFillUpperFromUint32x16 - OpMaskedShiftRightUint32x16 - OpMaskedShiftRightAndFillUpperFromUint32x16 - OpMaskedShiftRightSignExtendedUint32x16 - OpMaskedSubUint32x16 - OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16 - OpMaskedXorUint32x16 + OpLessEqualMaskedUint32x16 + OpLessMaskedUint32x16 OpMaxUint32x16 + OpMaxMaskedUint32x16 OpMinUint32x16 + OpMinMaskedUint32x16 OpNotEqualUint32x16 + OpNotEqualMaskedUint32x16 OpOrUint32x16 + OpOrMaskedUint32x16 OpPopCountUint32x16 + OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 + OpRotateLeftMaskedUint32x16 OpRotateRightUint32x16 + OpRotateRightMaskedUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpShiftLeftUint32x16 OpShiftLeftAndFillUpperFromUint32x16 + OpShiftLeftAndFillUpperFromMaskedUint32x16 + OpShiftLeftMaskedUint32x16 OpShiftRightUint32x16 OpShiftRightAndFillUpperFromUint32x16 + OpShiftRightAndFillUpperFromMaskedUint32x16 + OpShiftRightMaskedUint32x16 OpShiftRightSignExtendedUint32x16 + OpShiftRightSignExtendedMaskedUint32x16 OpSubUint32x16 + OpSubMaskedUint32x16 OpUnsignedSignedQuadDotProdAccumulateUint32x16 + OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpXorUint32x16 + OpXorMaskedUint32x16 OpAddUint32x4 + OpAddMaskedUint32x4 OpAndUint32x4 + OpAndMaskedUint32x4 OpAndNotUint32x4 + OpAndNotMaskedUint32x4 OpEqualUint32x4 + OpEqualMaskedUint32x4 OpGreaterUint32x4 OpGreaterEqualUint32x4 + OpGreaterEqualMaskedUint32x4 + OpGreaterMaskedUint32x4 OpLessUint32x4 OpLessEqualUint32x4 - OpMaskedAddUint32x4 - OpMaskedAndUint32x4 - OpMaskedAndNotUint32x4 - OpMaskedEqualUint32x4 - OpMaskedGreaterUint32x4 - OpMaskedGreaterEqualUint32x4 - OpMaskedLessUint32x4 - OpMaskedLessEqualUint32x4 - OpMaskedMaxUint32x4 - OpMaskedMinUint32x4 - OpMaskedNotEqualUint32x4 - OpMaskedOrUint32x4 - OpMaskedPopCountUint32x4 - OpMaskedRotateLeftUint32x4 - OpMaskedRotateRightUint32x4 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 - OpMaskedShiftLeftUint32x4 - OpMaskedShiftLeftAndFillUpperFromUint32x4 - OpMaskedShiftRightUint32x4 - OpMaskedShiftRightAndFillUpperFromUint32x4 - OpMaskedShiftRightSignExtendedUint32x4 - OpMaskedSubUint32x4 - OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4 - OpMaskedXorUint32x4 + OpLessEqualMaskedUint32x4 + OpLessMaskedUint32x4 OpMaxUint32x4 + OpMaxMaskedUint32x4 OpMinUint32x4 + OpMinMaskedUint32x4 OpMulEvenWidenUint32x4 OpNotEqualUint32x4 + OpNotEqualMaskedUint32x4 OpOrUint32x4 + OpOrMaskedUint32x4 OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPopCountUint32x4 + OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 + OpRotateLeftMaskedUint32x4 OpRotateRightUint32x4 + OpRotateRightMaskedUint32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpShiftAllLeftUint32x4 OpShiftAllRightUint32x4 OpShiftLeftUint32x4 OpShiftLeftAndFillUpperFromUint32x4 + OpShiftLeftAndFillUpperFromMaskedUint32x4 + OpShiftLeftMaskedUint32x4 OpShiftRightUint32x4 OpShiftRightAndFillUpperFromUint32x4 + OpShiftRightAndFillUpperFromMaskedUint32x4 + OpShiftRightMaskedUint32x4 OpShiftRightSignExtendedUint32x4 + OpShiftRightSignExtendedMaskedUint32x4 OpSubUint32x4 + OpSubMaskedUint32x4 OpUnsignedSignedQuadDotProdAccumulateUint32x4 + OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpXorUint32x4 + OpXorMaskedUint32x4 OpAddUint32x8 + OpAddMaskedUint32x8 OpAndUint32x8 + OpAndMaskedUint32x8 OpAndNotUint32x8 + OpAndNotMaskedUint32x8 OpEqualUint32x8 + OpEqualMaskedUint32x8 OpGreaterUint32x8 OpGreaterEqualUint32x8 + OpGreaterEqualMaskedUint32x8 + OpGreaterMaskedUint32x8 OpLessUint32x8 OpLessEqualUint32x8 - OpMaskedAddUint32x8 - OpMaskedAndUint32x8 - OpMaskedAndNotUint32x8 - OpMaskedEqualUint32x8 - OpMaskedGreaterUint32x8 - OpMaskedGreaterEqualUint32x8 - OpMaskedLessUint32x8 - OpMaskedLessEqualUint32x8 - OpMaskedMaxUint32x8 - OpMaskedMinUint32x8 - OpMaskedNotEqualUint32x8 - OpMaskedOrUint32x8 - OpMaskedPopCountUint32x8 - OpMaskedRotateLeftUint32x8 - OpMaskedRotateRightUint32x8 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 - OpMaskedShiftLeftUint32x8 - OpMaskedShiftLeftAndFillUpperFromUint32x8 - OpMaskedShiftRightUint32x8 - OpMaskedShiftRightAndFillUpperFromUint32x8 - OpMaskedShiftRightSignExtendedUint32x8 - OpMaskedSubUint32x8 - OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8 - OpMaskedXorUint32x8 + OpLessEqualMaskedUint32x8 + OpLessMaskedUint32x8 OpMaxUint32x8 + OpMaxMaskedUint32x8 OpMinUint32x8 + OpMinMaskedUint32x8 OpMulEvenWidenUint32x8 OpNotEqualUint32x8 + OpNotEqualMaskedUint32x8 OpOrUint32x8 + OpOrMaskedUint32x8 OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPopCountUint32x8 + OpPopCountMaskedUint32x8 OpRotateLeftUint32x8 + OpRotateLeftMaskedUint32x8 OpRotateRightUint32x8 + OpRotateRightMaskedUint32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpShiftAllLeftUint32x8 OpShiftAllRightUint32x8 OpShiftLeftUint32x8 OpShiftLeftAndFillUpperFromUint32x8 + OpShiftLeftAndFillUpperFromMaskedUint32x8 + OpShiftLeftMaskedUint32x8 OpShiftRightUint32x8 OpShiftRightAndFillUpperFromUint32x8 + OpShiftRightAndFillUpperFromMaskedUint32x8 + OpShiftRightMaskedUint32x8 OpShiftRightSignExtendedUint32x8 + OpShiftRightSignExtendedMaskedUint32x8 OpSubUint32x8 + OpSubMaskedUint32x8 OpUnsignedSignedQuadDotProdAccumulateUint32x8 + OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpXorUint32x8 + OpXorMaskedUint32x8 OpAddUint64x2 + OpAddMaskedUint64x2 OpAndUint64x2 + OpAndMaskedUint64x2 OpAndNotUint64x2 + OpAndNotMaskedUint64x2 OpEqualUint64x2 + OpEqualMaskedUint64x2 OpGreaterUint64x2 OpGreaterEqualUint64x2 + OpGreaterEqualMaskedUint64x2 + OpGreaterMaskedUint64x2 OpLessUint64x2 OpLessEqualUint64x2 - OpMaskedAddUint64x2 - OpMaskedAndUint64x2 - OpMaskedAndNotUint64x2 - OpMaskedEqualUint64x2 - OpMaskedGreaterUint64x2 - OpMaskedGreaterEqualUint64x2 - OpMaskedLessUint64x2 - OpMaskedLessEqualUint64x2 - OpMaskedMaxUint64x2 - OpMaskedMinUint64x2 - OpMaskedMulEvenWidenUint64x2 - OpMaskedNotEqualUint64x2 - OpMaskedOrUint64x2 - OpMaskedPopCountUint64x2 - OpMaskedRotateLeftUint64x2 - OpMaskedRotateRightUint64x2 - OpMaskedShiftAllLeftUint64x2 - OpMaskedShiftAllRightUint64x2 - OpMaskedShiftLeftUint64x2 - OpMaskedShiftLeftAndFillUpperFromUint64x2 - OpMaskedShiftRightUint64x2 - OpMaskedShiftRightAndFillUpperFromUint64x2 - OpMaskedShiftRightSignExtendedUint64x2 - OpMaskedSubUint64x2 - OpMaskedXorUint64x2 + OpLessEqualMaskedUint64x2 + OpLessMaskedUint64x2 OpMaxUint64x2 + OpMaxMaskedUint64x2 OpMinUint64x2 + OpMinMaskedUint64x2 OpMulEvenWidenUint64x2 + OpMulEvenWidenMaskedUint64x2 OpNotEqualUint64x2 + OpNotEqualMaskedUint64x2 OpOrUint64x2 + OpOrMaskedUint64x2 OpPopCountUint64x2 + OpPopCountMaskedUint64x2 OpRotateLeftUint64x2 + OpRotateLeftMaskedUint64x2 OpRotateRightUint64x2 + OpRotateRightMaskedUint64x2 OpShiftAllLeftUint64x2 + OpShiftAllLeftMaskedUint64x2 OpShiftAllRightUint64x2 + OpShiftAllRightMaskedUint64x2 OpShiftLeftUint64x2 OpShiftLeftAndFillUpperFromUint64x2 + OpShiftLeftAndFillUpperFromMaskedUint64x2 + OpShiftLeftMaskedUint64x2 OpShiftRightUint64x2 OpShiftRightAndFillUpperFromUint64x2 + OpShiftRightAndFillUpperFromMaskedUint64x2 + OpShiftRightMaskedUint64x2 OpShiftRightSignExtendedUint64x2 + OpShiftRightSignExtendedMaskedUint64x2 OpSubUint64x2 + OpSubMaskedUint64x2 OpXorUint64x2 + OpXorMaskedUint64x2 OpAddUint64x4 + OpAddMaskedUint64x4 OpAndUint64x4 + OpAndMaskedUint64x4 OpAndNotUint64x4 + OpAndNotMaskedUint64x4 OpEqualUint64x4 + OpEqualMaskedUint64x4 OpGreaterUint64x4 OpGreaterEqualUint64x4 + OpGreaterEqualMaskedUint64x4 + OpGreaterMaskedUint64x4 OpLessUint64x4 OpLessEqualUint64x4 - OpMaskedAddUint64x4 - OpMaskedAndUint64x4 - OpMaskedAndNotUint64x4 - OpMaskedEqualUint64x4 - OpMaskedGreaterUint64x4 - OpMaskedGreaterEqualUint64x4 - OpMaskedLessUint64x4 - OpMaskedLessEqualUint64x4 - OpMaskedMaxUint64x4 - OpMaskedMinUint64x4 - OpMaskedMulEvenWidenUint64x4 - OpMaskedNotEqualUint64x4 - OpMaskedOrUint64x4 - OpMaskedPopCountUint64x4 - OpMaskedRotateLeftUint64x4 - OpMaskedRotateRightUint64x4 - OpMaskedShiftAllLeftUint64x4 - OpMaskedShiftAllRightUint64x4 - OpMaskedShiftLeftUint64x4 - OpMaskedShiftLeftAndFillUpperFromUint64x4 - OpMaskedShiftRightUint64x4 - OpMaskedShiftRightAndFillUpperFromUint64x4 - OpMaskedShiftRightSignExtendedUint64x4 - OpMaskedSubUint64x4 - OpMaskedXorUint64x4 + OpLessEqualMaskedUint64x4 + OpLessMaskedUint64x4 OpMaxUint64x4 + OpMaxMaskedUint64x4 OpMinUint64x4 + OpMinMaskedUint64x4 OpMulEvenWidenUint64x4 + OpMulEvenWidenMaskedUint64x4 OpNotEqualUint64x4 + OpNotEqualMaskedUint64x4 OpOrUint64x4 + OpOrMaskedUint64x4 OpPopCountUint64x4 + OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 + OpRotateLeftMaskedUint64x4 OpRotateRightUint64x4 + OpRotateRightMaskedUint64x4 OpShiftAllLeftUint64x4 + OpShiftAllLeftMaskedUint64x4 OpShiftAllRightUint64x4 + OpShiftAllRightMaskedUint64x4 OpShiftLeftUint64x4 OpShiftLeftAndFillUpperFromUint64x4 + OpShiftLeftAndFillUpperFromMaskedUint64x4 + OpShiftLeftMaskedUint64x4 OpShiftRightUint64x4 OpShiftRightAndFillUpperFromUint64x4 + OpShiftRightAndFillUpperFromMaskedUint64x4 + OpShiftRightMaskedUint64x4 OpShiftRightSignExtendedUint64x4 + OpShiftRightSignExtendedMaskedUint64x4 OpSubUint64x4 + OpSubMaskedUint64x4 OpXorUint64x4 + OpXorMaskedUint64x4 OpAddUint64x8 + OpAddMaskedUint64x8 OpAndUint64x8 + OpAndMaskedUint64x8 OpAndNotUint64x8 + OpAndNotMaskedUint64x8 OpEqualUint64x8 + OpEqualMaskedUint64x8 OpGreaterUint64x8 OpGreaterEqualUint64x8 + OpGreaterEqualMaskedUint64x8 + OpGreaterMaskedUint64x8 OpLessUint64x8 OpLessEqualUint64x8 - OpMaskedAddUint64x8 - OpMaskedAndUint64x8 - OpMaskedAndNotUint64x8 - OpMaskedEqualUint64x8 - OpMaskedGreaterUint64x8 - OpMaskedGreaterEqualUint64x8 - OpMaskedLessUint64x8 - OpMaskedLessEqualUint64x8 - OpMaskedMaxUint64x8 - OpMaskedMinUint64x8 - OpMaskedMulEvenWidenUint64x8 - OpMaskedNotEqualUint64x8 - OpMaskedOrUint64x8 - OpMaskedPopCountUint64x8 - OpMaskedRotateLeftUint64x8 - OpMaskedRotateRightUint64x8 - OpMaskedShiftAllLeftUint64x8 - OpMaskedShiftAllRightUint64x8 - OpMaskedShiftLeftUint64x8 - OpMaskedShiftLeftAndFillUpperFromUint64x8 - OpMaskedShiftRightUint64x8 - OpMaskedShiftRightAndFillUpperFromUint64x8 - OpMaskedShiftRightSignExtendedUint64x8 - OpMaskedSubUint64x8 - OpMaskedXorUint64x8 + OpLessEqualMaskedUint64x8 + OpLessMaskedUint64x8 OpMaxUint64x8 + OpMaxMaskedUint64x8 OpMinUint64x8 + OpMinMaskedUint64x8 OpMulEvenWidenUint64x8 + OpMulEvenWidenMaskedUint64x8 OpNotEqualUint64x8 + OpNotEqualMaskedUint64x8 OpOrUint64x8 + OpOrMaskedUint64x8 OpPopCountUint64x8 + OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 + OpRotateLeftMaskedUint64x8 OpRotateRightUint64x8 + OpRotateRightMaskedUint64x8 OpShiftAllLeftUint64x8 + OpShiftAllLeftMaskedUint64x8 OpShiftAllRightUint64x8 + OpShiftAllRightMaskedUint64x8 OpShiftLeftUint64x8 OpShiftLeftAndFillUpperFromUint64x8 + OpShiftLeftAndFillUpperFromMaskedUint64x8 + OpShiftLeftMaskedUint64x8 OpShiftRightUint64x8 OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightAndFillUpperFromMaskedUint64x8 + OpShiftRightMaskedUint64x8 OpShiftRightSignExtendedUint64x8 + OpShiftRightSignExtendedMaskedUint64x8 OpSubUint64x8 + OpSubMaskedUint64x8 OpXorUint64x8 + OpXorMaskedUint64x8 OpAddUint8x16 + OpAddMaskedUint8x16 OpAndUint8x16 OpAndNotUint8x16 OpAverageUint8x16 + OpAverageMaskedUint8x16 OpEqualUint8x16 + OpEqualMaskedUint8x16 OpGaloisFieldMulUint8x16 + OpGaloisFieldMulMaskedUint8x16 OpGreaterUint8x16 OpGreaterEqualUint8x16 + OpGreaterEqualMaskedUint8x16 + OpGreaterMaskedUint8x16 OpLessUint8x16 OpLessEqualUint8x16 - OpMaskedAddUint8x16 - OpMaskedAverageUint8x16 - OpMaskedEqualUint8x16 - OpMaskedGaloisFieldMulUint8x16 - OpMaskedGreaterUint8x16 - OpMaskedGreaterEqualUint8x16 - OpMaskedLessUint8x16 - OpMaskedLessEqualUint8x16 - OpMaskedMaxUint8x16 - OpMaskedMinUint8x16 - OpMaskedNotEqualUint8x16 - OpMaskedPopCountUint8x16 - OpMaskedSaturatedAddUint8x16 - OpMaskedSaturatedSubUint8x16 - OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16 - OpMaskedSubUint8x16 + OpLessEqualMaskedUint8x16 + OpLessMaskedUint8x16 OpMaxUint8x16 + OpMaxMaskedUint8x16 OpMinUint8x16 + OpMinMaskedUint8x16 OpNotEqualUint8x16 + OpNotEqualMaskedUint8x16 OpOrUint8x16 OpPopCountUint8x16 + OpPopCountMaskedUint8x16 OpSaturatedAddUint8x16 + OpSaturatedAddMaskedUint8x16 OpSaturatedSubUint8x16 + OpSaturatedSubMaskedUint8x16 OpSaturatedUnsignedSignedPairDotProdUint8x16 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 OpSubUint8x16 + OpSubMaskedUint8x16 OpXorUint8x16 OpAddUint8x32 + OpAddMaskedUint8x32 OpAndUint8x32 OpAndNotUint8x32 OpAverageUint8x32 + OpAverageMaskedUint8x32 OpEqualUint8x32 + OpEqualMaskedUint8x32 OpGaloisFieldMulUint8x32 + OpGaloisFieldMulMaskedUint8x32 OpGreaterUint8x32 OpGreaterEqualUint8x32 + OpGreaterEqualMaskedUint8x32 + OpGreaterMaskedUint8x32 OpLessUint8x32 OpLessEqualUint8x32 - OpMaskedAddUint8x32 - OpMaskedAverageUint8x32 - OpMaskedEqualUint8x32 - OpMaskedGaloisFieldMulUint8x32 - OpMaskedGreaterUint8x32 - OpMaskedGreaterEqualUint8x32 - OpMaskedLessUint8x32 - OpMaskedLessEqualUint8x32 - OpMaskedMaxUint8x32 - OpMaskedMinUint8x32 - OpMaskedNotEqualUint8x32 - OpMaskedPopCountUint8x32 - OpMaskedSaturatedAddUint8x32 - OpMaskedSaturatedSubUint8x32 - OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32 - OpMaskedSubUint8x32 + OpLessEqualMaskedUint8x32 + OpLessMaskedUint8x32 OpMaxUint8x32 + OpMaxMaskedUint8x32 OpMinUint8x32 + OpMinMaskedUint8x32 OpNotEqualUint8x32 + OpNotEqualMaskedUint8x32 OpOrUint8x32 OpPopCountUint8x32 + OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 + OpSaturatedAddMaskedUint8x32 OpSaturatedSubUint8x32 + OpSaturatedSubMaskedUint8x32 OpSaturatedUnsignedSignedPairDotProdUint8x32 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 OpSubUint8x32 + OpSubMaskedUint8x32 OpXorUint8x32 OpAddUint8x64 + OpAddMaskedUint8x64 OpAverageUint8x64 + OpAverageMaskedUint8x64 OpEqualUint8x64 + OpEqualMaskedUint8x64 OpGaloisFieldMulUint8x64 + OpGaloisFieldMulMaskedUint8x64 OpGreaterUint8x64 OpGreaterEqualUint8x64 + OpGreaterEqualMaskedUint8x64 + OpGreaterMaskedUint8x64 OpLessUint8x64 OpLessEqualUint8x64 - OpMaskedAddUint8x64 - OpMaskedAverageUint8x64 - OpMaskedEqualUint8x64 - OpMaskedGaloisFieldMulUint8x64 - OpMaskedGreaterUint8x64 - OpMaskedGreaterEqualUint8x64 - OpMaskedLessUint8x64 - OpMaskedLessEqualUint8x64 - OpMaskedMaxUint8x64 - OpMaskedMinUint8x64 - OpMaskedNotEqualUint8x64 - OpMaskedPopCountUint8x64 - OpMaskedSaturatedAddUint8x64 - OpMaskedSaturatedSubUint8x64 - OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64 - OpMaskedSubUint8x64 + OpLessEqualMaskedUint8x64 + OpLessMaskedUint8x64 OpMaxUint8x64 + OpMaxMaskedUint8x64 OpMinUint8x64 + OpMinMaskedUint8x64 OpNotEqualUint8x64 + OpNotEqualMaskedUint8x64 OpPopCountUint8x64 + OpPopCountMaskedUint8x64 OpSaturatedAddUint8x64 + OpSaturatedAddMaskedUint8x64 OpSaturatedSubUint8x64 + OpSaturatedSubMaskedUint8x64 OpSaturatedUnsignedSignedPairDotProdUint8x64 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 OpSubUint8x64 + OpSubMaskedUint8x64 OpCeilWithPrecisionFloat32x16 + OpCeilWithPrecisionMaskedFloat32x16 OpDiffWithCeilWithPrecisionFloat32x16 + OpDiffWithCeilWithPrecisionMaskedFloat32x16 OpDiffWithFloorWithPrecisionFloat32x16 + OpDiffWithFloorWithPrecisionMaskedFloat32x16 OpDiffWithRoundWithPrecisionFloat32x16 + OpDiffWithRoundWithPrecisionMaskedFloat32x16 OpDiffWithTruncWithPrecisionFloat32x16 + OpDiffWithTruncWithPrecisionMaskedFloat32x16 OpFloorWithPrecisionFloat32x16 - OpMaskedCeilWithPrecisionFloat32x16 - OpMaskedDiffWithCeilWithPrecisionFloat32x16 - OpMaskedDiffWithFloorWithPrecisionFloat32x16 - OpMaskedDiffWithRoundWithPrecisionFloat32x16 - OpMaskedDiffWithTruncWithPrecisionFloat32x16 - OpMaskedFloorWithPrecisionFloat32x16 - OpMaskedRoundWithPrecisionFloat32x16 - OpMaskedTruncWithPrecisionFloat32x16 + OpFloorWithPrecisionMaskedFloat32x16 OpRoundWithPrecisionFloat32x16 + OpRoundWithPrecisionMaskedFloat32x16 OpTruncWithPrecisionFloat32x16 + OpTruncWithPrecisionMaskedFloat32x16 OpCeilWithPrecisionFloat32x4 + OpCeilWithPrecisionMaskedFloat32x4 OpDiffWithCeilWithPrecisionFloat32x4 + OpDiffWithCeilWithPrecisionMaskedFloat32x4 OpDiffWithFloorWithPrecisionFloat32x4 + OpDiffWithFloorWithPrecisionMaskedFloat32x4 OpDiffWithRoundWithPrecisionFloat32x4 + OpDiffWithRoundWithPrecisionMaskedFloat32x4 OpDiffWithTruncWithPrecisionFloat32x4 + OpDiffWithTruncWithPrecisionMaskedFloat32x4 OpFloorWithPrecisionFloat32x4 - OpMaskedCeilWithPrecisionFloat32x4 - OpMaskedDiffWithCeilWithPrecisionFloat32x4 - OpMaskedDiffWithFloorWithPrecisionFloat32x4 - OpMaskedDiffWithRoundWithPrecisionFloat32x4 - OpMaskedDiffWithTruncWithPrecisionFloat32x4 - OpMaskedFloorWithPrecisionFloat32x4 - OpMaskedRoundWithPrecisionFloat32x4 - OpMaskedTruncWithPrecisionFloat32x4 + OpFloorWithPrecisionMaskedFloat32x4 OpRoundWithPrecisionFloat32x4 + OpRoundWithPrecisionMaskedFloat32x4 OpTruncWithPrecisionFloat32x4 + OpTruncWithPrecisionMaskedFloat32x4 OpCeilWithPrecisionFloat32x8 + OpCeilWithPrecisionMaskedFloat32x8 OpDiffWithCeilWithPrecisionFloat32x8 + OpDiffWithCeilWithPrecisionMaskedFloat32x8 OpDiffWithFloorWithPrecisionFloat32x8 + OpDiffWithFloorWithPrecisionMaskedFloat32x8 OpDiffWithRoundWithPrecisionFloat32x8 + OpDiffWithRoundWithPrecisionMaskedFloat32x8 OpDiffWithTruncWithPrecisionFloat32x8 + OpDiffWithTruncWithPrecisionMaskedFloat32x8 OpFloorWithPrecisionFloat32x8 + OpFloorWithPrecisionMaskedFloat32x8 OpGet128Float32x8 - OpMaskedCeilWithPrecisionFloat32x8 - OpMaskedDiffWithCeilWithPrecisionFloat32x8 - OpMaskedDiffWithFloorWithPrecisionFloat32x8 - OpMaskedDiffWithRoundWithPrecisionFloat32x8 - OpMaskedDiffWithTruncWithPrecisionFloat32x8 - OpMaskedFloorWithPrecisionFloat32x8 - OpMaskedRoundWithPrecisionFloat32x8 - OpMaskedTruncWithPrecisionFloat32x8 OpRoundWithPrecisionFloat32x8 + OpRoundWithPrecisionMaskedFloat32x8 OpSet128Float32x8 OpTruncWithPrecisionFloat32x8 + OpTruncWithPrecisionMaskedFloat32x8 OpCeilWithPrecisionFloat64x2 + OpCeilWithPrecisionMaskedFloat64x2 OpDiffWithCeilWithPrecisionFloat64x2 + OpDiffWithCeilWithPrecisionMaskedFloat64x2 OpDiffWithFloorWithPrecisionFloat64x2 + OpDiffWithFloorWithPrecisionMaskedFloat64x2 OpDiffWithRoundWithPrecisionFloat64x2 + OpDiffWithRoundWithPrecisionMaskedFloat64x2 OpDiffWithTruncWithPrecisionFloat64x2 + OpDiffWithTruncWithPrecisionMaskedFloat64x2 OpFloorWithPrecisionFloat64x2 - OpMaskedCeilWithPrecisionFloat64x2 - OpMaskedDiffWithCeilWithPrecisionFloat64x2 - OpMaskedDiffWithFloorWithPrecisionFloat64x2 - OpMaskedDiffWithRoundWithPrecisionFloat64x2 - OpMaskedDiffWithTruncWithPrecisionFloat64x2 - OpMaskedFloorWithPrecisionFloat64x2 - OpMaskedRoundWithPrecisionFloat64x2 - OpMaskedTruncWithPrecisionFloat64x2 + OpFloorWithPrecisionMaskedFloat64x2 OpRoundWithPrecisionFloat64x2 + OpRoundWithPrecisionMaskedFloat64x2 OpTruncWithPrecisionFloat64x2 + OpTruncWithPrecisionMaskedFloat64x2 OpCeilWithPrecisionFloat64x4 + OpCeilWithPrecisionMaskedFloat64x4 OpDiffWithCeilWithPrecisionFloat64x4 + OpDiffWithCeilWithPrecisionMaskedFloat64x4 OpDiffWithFloorWithPrecisionFloat64x4 + OpDiffWithFloorWithPrecisionMaskedFloat64x4 OpDiffWithRoundWithPrecisionFloat64x4 + OpDiffWithRoundWithPrecisionMaskedFloat64x4 OpDiffWithTruncWithPrecisionFloat64x4 + OpDiffWithTruncWithPrecisionMaskedFloat64x4 OpFloorWithPrecisionFloat64x4 + OpFloorWithPrecisionMaskedFloat64x4 OpGet128Float64x4 - OpMaskedCeilWithPrecisionFloat64x4 - OpMaskedDiffWithCeilWithPrecisionFloat64x4 - OpMaskedDiffWithFloorWithPrecisionFloat64x4 - OpMaskedDiffWithRoundWithPrecisionFloat64x4 - OpMaskedDiffWithTruncWithPrecisionFloat64x4 - OpMaskedFloorWithPrecisionFloat64x4 - OpMaskedRoundWithPrecisionFloat64x4 - OpMaskedTruncWithPrecisionFloat64x4 OpRoundWithPrecisionFloat64x4 + OpRoundWithPrecisionMaskedFloat64x4 OpSet128Float64x4 OpTruncWithPrecisionFloat64x4 + OpTruncWithPrecisionMaskedFloat64x4 OpCeilWithPrecisionFloat64x8 + OpCeilWithPrecisionMaskedFloat64x8 OpDiffWithCeilWithPrecisionFloat64x8 + OpDiffWithCeilWithPrecisionMaskedFloat64x8 OpDiffWithFloorWithPrecisionFloat64x8 + OpDiffWithFloorWithPrecisionMaskedFloat64x8 OpDiffWithRoundWithPrecisionFloat64x8 + OpDiffWithRoundWithPrecisionMaskedFloat64x8 OpDiffWithTruncWithPrecisionFloat64x8 + OpDiffWithTruncWithPrecisionMaskedFloat64x8 OpFloorWithPrecisionFloat64x8 - OpMaskedCeilWithPrecisionFloat64x8 - OpMaskedDiffWithCeilWithPrecisionFloat64x8 - OpMaskedDiffWithFloorWithPrecisionFloat64x8 - OpMaskedDiffWithRoundWithPrecisionFloat64x8 - OpMaskedDiffWithTruncWithPrecisionFloat64x8 - OpMaskedFloorWithPrecisionFloat64x8 - OpMaskedRoundWithPrecisionFloat64x8 - OpMaskedTruncWithPrecisionFloat64x8 + OpFloorWithPrecisionMaskedFloat64x8 OpRoundWithPrecisionFloat64x8 + OpRoundWithPrecisionMaskedFloat64x8 OpTruncWithPrecisionFloat64x8 + OpTruncWithPrecisionMaskedFloat64x8 OpGet128Int16x16 - OpMaskedShiftAllLeftAndFillUpperFromInt16x16 - OpMaskedShiftAllRightAndFillUpperFromInt16x16 OpSet128Int16x16 OpShiftAllLeftAndFillUpperFromInt16x16 + OpShiftAllLeftAndFillUpperFromMaskedInt16x16 OpShiftAllRightAndFillUpperFromInt16x16 - OpMaskedShiftAllLeftAndFillUpperFromInt16x32 - OpMaskedShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromMaskedInt16x16 OpShiftAllLeftAndFillUpperFromInt16x32 + OpShiftAllLeftAndFillUpperFromMaskedInt16x32 OpShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromMaskedInt16x32 OpGetElemInt16x8 - OpMaskedShiftAllLeftAndFillUpperFromInt16x8 - OpMaskedShiftAllRightAndFillUpperFromInt16x8 OpSetElemInt16x8 OpShiftAllLeftAndFillUpperFromInt16x8 + OpShiftAllLeftAndFillUpperFromMaskedInt16x8 OpShiftAllRightAndFillUpperFromInt16x8 - OpMaskedRotateAllLeftInt32x16 - OpMaskedRotateAllRightInt32x16 - OpMaskedShiftAllLeftAndFillUpperFromInt32x16 - OpMaskedShiftAllRightAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromMaskedInt16x8 OpRotateAllLeftInt32x16 + OpRotateAllLeftMaskedInt32x16 OpRotateAllRightInt32x16 + OpRotateAllRightMaskedInt32x16 OpShiftAllLeftAndFillUpperFromInt32x16 + OpShiftAllLeftAndFillUpperFromMaskedInt32x16 OpShiftAllRightAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromMaskedInt32x16 OpGetElemInt32x4 - OpMaskedRotateAllLeftInt32x4 - OpMaskedRotateAllRightInt32x4 - OpMaskedShiftAllLeftAndFillUpperFromInt32x4 - OpMaskedShiftAllRightAndFillUpperFromInt32x4 OpRotateAllLeftInt32x4 + OpRotateAllLeftMaskedInt32x4 OpRotateAllRightInt32x4 + OpRotateAllRightMaskedInt32x4 OpSetElemInt32x4 OpShiftAllLeftAndFillUpperFromInt32x4 + OpShiftAllLeftAndFillUpperFromMaskedInt32x4 OpShiftAllRightAndFillUpperFromInt32x4 + OpShiftAllRightAndFillUpperFromMaskedInt32x4 OpGet128Int32x8 - OpMaskedRotateAllLeftInt32x8 - OpMaskedRotateAllRightInt32x8 - OpMaskedShiftAllLeftAndFillUpperFromInt32x8 - OpMaskedShiftAllRightAndFillUpperFromInt32x8 OpRotateAllLeftInt32x8 + OpRotateAllLeftMaskedInt32x8 OpRotateAllRightInt32x8 + OpRotateAllRightMaskedInt32x8 OpSet128Int32x8 OpShiftAllLeftAndFillUpperFromInt32x8 + OpShiftAllLeftAndFillUpperFromMaskedInt32x8 OpShiftAllRightAndFillUpperFromInt32x8 + OpShiftAllRightAndFillUpperFromMaskedInt32x8 OpGetElemInt64x2 - OpMaskedRotateAllLeftInt64x2 - OpMaskedRotateAllRightInt64x2 - OpMaskedShiftAllLeftAndFillUpperFromInt64x2 - OpMaskedShiftAllRightAndFillUpperFromInt64x2 OpRotateAllLeftInt64x2 + OpRotateAllLeftMaskedInt64x2 OpRotateAllRightInt64x2 + OpRotateAllRightMaskedInt64x2 OpSetElemInt64x2 OpShiftAllLeftAndFillUpperFromInt64x2 + OpShiftAllLeftAndFillUpperFromMaskedInt64x2 OpShiftAllRightAndFillUpperFromInt64x2 + OpShiftAllRightAndFillUpperFromMaskedInt64x2 OpGet128Int64x4 - OpMaskedRotateAllLeftInt64x4 - OpMaskedRotateAllRightInt64x4 - OpMaskedShiftAllLeftAndFillUpperFromInt64x4 - OpMaskedShiftAllRightAndFillUpperFromInt64x4 OpRotateAllLeftInt64x4 + OpRotateAllLeftMaskedInt64x4 OpRotateAllRightInt64x4 + OpRotateAllRightMaskedInt64x4 OpSet128Int64x4 OpShiftAllLeftAndFillUpperFromInt64x4 + OpShiftAllLeftAndFillUpperFromMaskedInt64x4 OpShiftAllRightAndFillUpperFromInt64x4 - OpMaskedRotateAllLeftInt64x8 - OpMaskedRotateAllRightInt64x8 - OpMaskedShiftAllLeftAndFillUpperFromInt64x8 - OpMaskedShiftAllRightAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromMaskedInt64x4 OpRotateAllLeftInt64x8 + OpRotateAllLeftMaskedInt64x8 OpRotateAllRightInt64x8 + OpRotateAllRightMaskedInt64x8 OpShiftAllLeftAndFillUpperFromInt64x8 + OpShiftAllLeftAndFillUpperFromMaskedInt64x8 OpShiftAllRightAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromMaskedInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 OpGet128Int8x32 OpSet128Int8x32 OpGet128Uint16x16 - OpMaskedShiftAllLeftAndFillUpperFromUint16x16 - OpMaskedShiftAllRightAndFillUpperFromUint16x16 OpSet128Uint16x16 OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromMaskedUint16x16 OpShiftAllRightAndFillUpperFromUint16x16 - OpMaskedShiftAllLeftAndFillUpperFromUint16x32 - OpMaskedShiftAllRightAndFillUpperFromUint16x32 + OpShiftAllRightAndFillUpperFromMaskedUint16x16 OpShiftAllLeftAndFillUpperFromUint16x32 + OpShiftAllLeftAndFillUpperFromMaskedUint16x32 OpShiftAllRightAndFillUpperFromUint16x32 + OpShiftAllRightAndFillUpperFromMaskedUint16x32 OpGetElemUint16x8 - OpMaskedShiftAllLeftAndFillUpperFromUint16x8 - OpMaskedShiftAllRightAndFillUpperFromUint16x8 OpSetElemUint16x8 OpShiftAllLeftAndFillUpperFromUint16x8 + OpShiftAllLeftAndFillUpperFromMaskedUint16x8 OpShiftAllRightAndFillUpperFromUint16x8 - OpMaskedRotateAllLeftUint32x16 - OpMaskedRotateAllRightUint32x16 - OpMaskedShiftAllLeftAndFillUpperFromUint32x16 - OpMaskedShiftAllRightAndFillUpperFromUint32x16 + OpShiftAllRightAndFillUpperFromMaskedUint16x8 OpRotateAllLeftUint32x16 + OpRotateAllLeftMaskedUint32x16 OpRotateAllRightUint32x16 + OpRotateAllRightMaskedUint32x16 OpShiftAllLeftAndFillUpperFromUint32x16 + OpShiftAllLeftAndFillUpperFromMaskedUint32x16 OpShiftAllRightAndFillUpperFromUint32x16 + OpShiftAllRightAndFillUpperFromMaskedUint32x16 OpGetElemUint32x4 - OpMaskedRotateAllLeftUint32x4 - OpMaskedRotateAllRightUint32x4 - OpMaskedShiftAllLeftAndFillUpperFromUint32x4 - OpMaskedShiftAllRightAndFillUpperFromUint32x4 OpRotateAllLeftUint32x4 + OpRotateAllLeftMaskedUint32x4 OpRotateAllRightUint32x4 + OpRotateAllRightMaskedUint32x4 OpSetElemUint32x4 OpShiftAllLeftAndFillUpperFromUint32x4 + OpShiftAllLeftAndFillUpperFromMaskedUint32x4 OpShiftAllRightAndFillUpperFromUint32x4 + OpShiftAllRightAndFillUpperFromMaskedUint32x4 OpGet128Uint32x8 - OpMaskedRotateAllLeftUint32x8 - OpMaskedRotateAllRightUint32x8 - OpMaskedShiftAllLeftAndFillUpperFromUint32x8 - OpMaskedShiftAllRightAndFillUpperFromUint32x8 OpRotateAllLeftUint32x8 + OpRotateAllLeftMaskedUint32x8 OpRotateAllRightUint32x8 + OpRotateAllRightMaskedUint32x8 OpSet128Uint32x8 OpShiftAllLeftAndFillUpperFromUint32x8 + OpShiftAllLeftAndFillUpperFromMaskedUint32x8 OpShiftAllRightAndFillUpperFromUint32x8 + OpShiftAllRightAndFillUpperFromMaskedUint32x8 OpGetElemUint64x2 - OpMaskedRotateAllLeftUint64x2 - OpMaskedRotateAllRightUint64x2 - OpMaskedShiftAllLeftAndFillUpperFromUint64x2 - OpMaskedShiftAllRightAndFillUpperFromUint64x2 OpRotateAllLeftUint64x2 + OpRotateAllLeftMaskedUint64x2 OpRotateAllRightUint64x2 + OpRotateAllRightMaskedUint64x2 OpSetElemUint64x2 OpShiftAllLeftAndFillUpperFromUint64x2 + OpShiftAllLeftAndFillUpperFromMaskedUint64x2 OpShiftAllRightAndFillUpperFromUint64x2 + OpShiftAllRightAndFillUpperFromMaskedUint64x2 OpGet128Uint64x4 - OpMaskedRotateAllLeftUint64x4 - OpMaskedRotateAllRightUint64x4 - OpMaskedShiftAllLeftAndFillUpperFromUint64x4 - OpMaskedShiftAllRightAndFillUpperFromUint64x4 OpRotateAllLeftUint64x4 + OpRotateAllLeftMaskedUint64x4 OpRotateAllRightUint64x4 + OpRotateAllRightMaskedUint64x4 OpSet128Uint64x4 OpShiftAllLeftAndFillUpperFromUint64x4 + OpShiftAllLeftAndFillUpperFromMaskedUint64x4 OpShiftAllRightAndFillUpperFromUint64x4 - OpMaskedRotateAllLeftUint64x8 - OpMaskedRotateAllRightUint64x8 - OpMaskedShiftAllLeftAndFillUpperFromUint64x8 - OpMaskedShiftAllRightAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromMaskedUint64x4 OpRotateAllLeftUint64x8 + OpRotateAllLeftMaskedUint64x8 OpRotateAllRightUint64x8 + OpRotateAllRightMaskedUint64x8 OpShiftAllLeftAndFillUpperFromUint64x8 + OpShiftAllLeftAndFillUpperFromMaskedUint64x8 OpShiftAllRightAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromMaskedUint64x8 OpGaloisFieldAffineTransformUint8x16 OpGaloisFieldAffineTransformInversedUint8x16 + OpGaloisFieldAffineTransformInversedMaskedUint8x16 + OpGaloisFieldAffineTransformMaskedUint8x16 OpGetElemUint8x16 - OpMaskedGaloisFieldAffineTransformUint8x16 - OpMaskedGaloisFieldAffineTransformInversedUint8x16 OpSetElemUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformInversedUint8x32 + OpGaloisFieldAffineTransformInversedMaskedUint8x32 + OpGaloisFieldAffineTransformMaskedUint8x32 OpGet128Uint8x32 - OpMaskedGaloisFieldAffineTransformUint8x32 - OpMaskedGaloisFieldAffineTransformInversedUint8x32 OpSet128Uint8x32 OpGaloisFieldAffineTransformUint8x64 OpGaloisFieldAffineTransformInversedUint8x64 - OpMaskedGaloisFieldAffineTransformUint8x64 - OpMaskedGaloisFieldAffineTransformInversedUint8x64 + OpGaloisFieldAffineTransformInversedMaskedUint8x64 + OpGaloisFieldAffineTransformMaskedUint8x64 ) var opcodeTable = [...]opInfo{ @@ -18580,12 +18580,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS512", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18593,9 +18596,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PS512", + name: "VRCP14PS512", argLen: 1, - asm: x86.AVRSQRT14PS, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18606,13 +18609,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS512", + name: "VRCP14PSMasked512", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18620,15 +18623,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VRSQRT14PS512", + argLen: 1, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18636,15 +18636,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VRSQRT14PSMasked512", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18652,15 +18650,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VDIVPS512", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18668,10 +18664,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VDIVPSMasked512", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18684,13 +18679,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked512", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18698,13 +18695,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked512", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18712,14 +18712,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked512", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADDSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18727,10 +18728,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked512", + name: "VFMADDSUB213PSMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVFMADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18744,16 +18745,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked512", - argLen: 4, + name: "VFMSUBADD213PS512", + argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18777,6 +18777,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMAXPS512", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VMAXPSMasked512", argLen: 3, @@ -18794,15 +18809,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked512", - argLen: 3, + name: "VMINPS512", + argLen: 2, commutative: true, asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18810,10 +18824,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked512", + name: "VMINPSMasked512", argLen: 3, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18826,14 +18840,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked512", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMULPS512", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18841,13 +18855,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked512", + name: "VSCALEFPS512", argLen: 2, - asm: x86.AVSQRTPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18855,9 +18869,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked512", + name: "VSCALEFPSMasked512", argLen: 3, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18870,14 +18884,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS512", - argLen: 2, + name: "VMULPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVMAXPS, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18885,14 +18900,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS512", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18900,14 +18913,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS512", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VSQRTPSMasked512", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18915,9 +18927,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS512", + name: "VSUBPS512", argLen: 2, - asm: x86.AVSCALEFPS, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18929,12 +18941,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS512", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VSUBPSMasked512", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18942,9 +18956,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS512", - argLen: 2, - asm: x86.AVSUBPS, + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18956,14 +18971,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS128", - argLen: 2, + name: "VADDPSMasked128", + argLen: 3, commutative: true, asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18998,26 +19014,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS128", - argLen: 1, - asm: x86.AVRSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPS128", + name: "VRCP14PSMasked128", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19025,15 +19028,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19041,15 +19041,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19057,15 +19055,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VDIVPS128", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19073,10 +19069,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19089,42 +19084,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19149,16 +19117,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked128", - argLen: 4, + name: "VFMADDSUB213PS128", + argLen: 3, resultInArg0: true, asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19166,10 +19133,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked128", + name: "VFMADDSUB213PSMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19183,15 +19150,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUBADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19199,15 +19166,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUBADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19215,15 +19183,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked128", - argLen: 3, + name: "VMAXPS128", + argLen: 2, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19231,9 +19198,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked128", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMAXPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19246,13 +19214,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked128", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19260,9 +19229,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked128", - argLen: 3, - asm: x86.AVSUBPS, + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19275,10 +19245,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS128", + name: "VMULPS128", argLen: 2, commutative: true, - asm: x86.AVMAXPS, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19290,10 +19260,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS128", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19305,14 +19274,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS128", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VSCALEFPSMasked128", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19320,13 +19289,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS128", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VMULPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19375,13 +19346,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS128", + name: "VSQRTPSMasked128", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19389,10 +19360,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS256", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, + name: "VSUBPS128", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19404,13 +19374,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, + name: "VSUBPSMasked128", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19418,12 +19389,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS256", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19431,12 +19404,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19444,9 +19420,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS256", + name: "VADDSUBPS256", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19458,15 +19434,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19474,15 +19447,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19490,15 +19461,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19506,15 +19474,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19522,13 +19488,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked256", + name: "VDIVPS256", argLen: 2, - asm: x86.AVRCP14PS, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19536,13 +19502,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked256", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19550,14 +19517,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked256", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19582,16 +19550,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked256", - argLen: 4, + name: "VFMADDSUB213PS256", + argLen: 3, resultInArg0: true, asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19599,10 +19566,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked256", + name: "VFMADDSUB213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19616,15 +19583,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUBADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19632,15 +19599,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUBADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19648,15 +19616,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked256", - argLen: 3, + name: "VMAXPS256", + argLen: 2, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19664,9 +19631,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19679,13 +19647,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19693,9 +19662,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked256", - argLen: 3, - asm: x86.AVSUBPS, + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19708,10 +19678,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS256", + name: "VMULPS256", argLen: 2, commutative: true, - asm: x86.AVMAXPS, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19723,10 +19693,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19738,14 +19707,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS256", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19753,13 +19722,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19808,13 +19779,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS256", + name: "VSQRTPSMasked256", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19822,10 +19793,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD128", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VSUBPS256", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19837,13 +19807,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD128", - argLen: 2, - asm: x86.AVADDSUBPD, + name: "VSUBPSMasked256", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19851,12 +19822,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VADDPD128", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19864,12 +19837,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19877,9 +19853,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD128", + name: "VADDSUBPD128", argLen: 2, - asm: x86.AVDIVPD, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19891,15 +19867,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19907,15 +19880,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19923,31 +19894,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19955,9 +19907,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", + name: "VRSQRT14PDMasked128", argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19969,13 +19921,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked128", + name: "VDIVPD128", argLen: 2, - asm: x86.AVRSQRT14PD, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19998,16 +19950,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked128", - argLen: 4, + name: "VFMADD213PD128", + argLen: 3, resultInArg0: true, asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20015,10 +19966,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked128", + name: "VFMADD213PDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20032,32 +19983,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked128", - argLen: 4, + name: "VFMADDSUB213PD128", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20065,15 +19999,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VFMADDSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20081,15 +20016,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, + name: "VFMSUBADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20097,14 +20032,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked128", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VFMSUBADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20112,13 +20049,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20126,9 +20064,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked128", - argLen: 3, - asm: x86.AVSUBPD, + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20141,10 +20080,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMINPD128", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20156,14 +20095,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", - argLen: 2, + name: "VMINPDMasked128", + argLen: 3, commutative: true, asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20200,13 +20140,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", - argLen: 2, - asm: x86.AVHADDPD, + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20214,13 +20155,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20228,12 +20171,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20241,9 +20185,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD128", + name: "VHSUBPD128", argLen: 2, - asm: x86.AVSUBPD, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20255,14 +20199,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20270,13 +20212,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", + name: "VSQRTPDMasked128", argLen: 2, - asm: x86.AVADDSUBPD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20284,12 +20226,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VSUBPD128", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20297,12 +20240,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20310,9 +20255,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", - argLen: 2, - asm: x86.AVDIVPD, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20324,15 +20270,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20340,15 +20286,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20356,15 +20300,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20372,15 +20313,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20388,13 +20327,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked256", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20416,14 +20354,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked256", - argLen: 3, + name: "VDIVPD256", + argLen: 2, asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20431,16 +20368,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20448,16 +20383,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked256", - argLen: 4, + name: "VFMADD213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20465,10 +20399,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked256", + name: "VFMADD213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20482,15 +20416,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, + name: "VFMADDSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20498,15 +20432,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VFMADDSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20514,15 +20449,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, + name: "VFMSUBADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20530,14 +20465,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked256", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VFMSUBADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20545,13 +20482,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked256", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VMAXPD256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20559,9 +20497,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked256", - argLen: 3, - asm: x86.AVSUBPD, + name: "VMAXPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20574,10 +20513,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD256", + name: "VMINPD256", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20589,14 +20528,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD256", - argLen: 2, + name: "VMINPDMasked256", + argLen: 3, commutative: true, asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20633,27 +20573,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD256", - argLen: 2, - asm: x86.AVHADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHSUBPD256", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VSCALEFPDMasked256", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20661,12 +20588,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD256", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VMULPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20674,9 +20604,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD256", + name: "VHADDPD256", argLen: 2, - asm: x86.AVSUBPD, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20688,10 +20618,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VHSUBPD256", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20703,22 +20632,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD512", - argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PD512", + name: "VSQRTPD256", argLen: 1, - asm: x86.AVRSQRT14PD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20729,13 +20645,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD512", + name: "VSQRTPDMasked256", argLen: 2, - asm: x86.AVDIVPD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20743,15 +20659,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VSUBPD256", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20759,15 +20673,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VSUBPDMasked256", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20775,15 +20688,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VADDPD512", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20807,13 +20719,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked512", - argLen: 2, + name: "VRCP14PD512", + argLen: 1, asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20821,9 +20732,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked512", + name: "VRCP14PDMasked512", argLen: 2, - asm: x86.AVRSQRT14PD, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20835,128 +20746,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked512", - argLen: 3, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VRSQRT14PD512", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "VMULPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked512", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20964,13 +20773,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked512", + name: "VDIVPD512", argLen: 2, - asm: x86.AVSQRTPD, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20978,9 +20787,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked512", + name: "VDIVPDMasked512", argLen: 3, - asm: x86.AVSUBPD, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20993,29 +20802,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD512", - argLen: 2, - commutative: true, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPD512", - argLen: 2, - commutative: true, - asm: x86.AVMINPD, + name: "VFMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21023,14 +20818,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD512", - argLen: 2, - commutative: true, - asm: x86.AVMULPD, + name: "VFMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21038,13 +20835,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD512", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VFMADDSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21052,12 +20851,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD512", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VFMADDSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21065,13 +20868,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD512", - argLen: 2, - asm: x86.AVSUBPD, + name: "VFMSUBADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21079,12 +20884,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, + name: "VFMSUBADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21092,10 +20901,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW256", + name: "VMAXPD512", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21107,14 +20916,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW256", - argLen: 2, + name: "VMAXPDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQW, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21122,9 +20932,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW256", - argLen: 2, - asm: x86.AVPCMPGTW, + name: "VMINPD512", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21136,24 +20947,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked256", - argLen: 2, - asm: x86.AVPABSW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDWMasked256", + name: "VMINPDMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21166,15 +20963,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked256", - argLen: 3, + name: "VMULPD512", + argLen: 2, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21182,15 +20978,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VSCALEFPD512", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21198,10 +20992,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VSCALEFPDMasked512", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21214,10 +21007,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked256", + name: "VMULPDMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21230,14 +21023,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked256", - argLen: 3, - asm: x86.AVPMADDWD, + name: "VSQRTPD512", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21245,9 +21036,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked256", + name: "VSQRTPDMasked512", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21259,15 +21050,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VSUBPD512", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21275,9 +21064,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked256", + name: "VSUBPDMasked512", argLen: 3, - asm: x86.AVPSUBSW, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21290,14 +21079,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked256", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21305,16 +21092,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPABSWMasked256", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21322,14 +21106,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked256", - argLen: 3, - asm: x86.AVPSRLVW, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21337,16 +21121,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPADDWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21354,14 +21137,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked256", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPCMPEQW256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21369,14 +21152,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked256", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPCMPGTW256", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21399,14 +21181,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW256", - argLen: 2, + name: "VPMAXSWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21414,10 +21197,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW256", + name: "VPMINSW256", argLen: 2, commutative: true, - asm: x86.AVPMULHW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21429,14 +21212,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW256", - argLen: 2, + name: "VPMINSWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21444,9 +21228,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD256", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21458,13 +21243,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW256", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPMULHWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21472,9 +21259,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW256", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPMULLW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21486,12 +21274,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW256", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPMULLWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21499,10 +21290,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPMADDWD256", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21514,13 +21304,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW256", - argLen: 2, - asm: x86.AVPHADDSW, + name: "VPMADDWDMasked256", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21528,9 +21319,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW256", + name: "VPHADDW256", argLen: 2, - asm: x86.AVPHSUBSW, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21542,9 +21333,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW256", + name: "VPHSUBW256", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21556,13 +21347,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW256", - argLen: 2, - asm: x86.AVPSLLW, + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21570,13 +21360,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW256", + name: "VPOPCNTWMasked256", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21584,9 +21374,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAW256", - argLen: 2, - asm: x86.AVPSRAW, + name: "VPADDSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21598,13 +21389,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVW256", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPADDSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21612,15 +21405,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPHADDSW256", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21628,9 +21419,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW256", + name: "VPHSUBSW256", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21642,15 +21433,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPSUBSW256", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21658,13 +21447,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW256", - argLen: 2, - asm: x86.AVPSRAVW, + name: "VPSUBSWMasked256", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21672,9 +21462,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW256", + name: "VPSLLW256", argLen: 2, - asm: x86.AVPSIGNW, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21686,9 +21476,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW256", + name: "VPSRLW256", argLen: 2, - asm: x86.AVPSUBW, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21700,12 +21490,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW512", - argLen: 1, - asm: x86.AVPABSW, + name: "VPSRAW256", + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21713,10 +21504,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW512", - argLen: 2, - commutative: true, - asm: x86.AVPADDW, + name: "VPSLLVW256", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21728,13 +21518,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked512", - argLen: 2, - asm: x86.AVPABSW, + name: "VPSHLDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21742,15 +21534,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPSHLDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21758,10 +21551,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPSLLVWMasked256", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21774,15 +21566,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21790,15 +21580,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPSHRDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21806,15 +21596,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPSHRDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21822,9 +21613,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked512", + name: "VPSRLVWMasked256", argLen: 3, - asm: x86.AVPMADDWD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21837,29 +21628,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked512", + name: "VPSRAVW256", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21867,9 +21642,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked512", + name: "VPSRAVWMasked256", argLen: 3, - asm: x86.AVPSUBSW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21882,14 +21657,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked512", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPSIGNW256", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21897,16 +21671,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPSUBW256", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21914,9 +21685,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked512", + name: "VPSUBWMasked256", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21929,16 +21700,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPABSW512", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21946,14 +21713,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked512", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPABSWMasked512", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21961,14 +21727,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked512", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPADDW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21976,14 +21742,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW512", - argLen: 2, + name: "VPADDWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21991,10 +21758,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW512", + name: "VPMAXSW512", argLen: 2, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22006,14 +21773,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW512", - argLen: 2, + name: "VPMAXSWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMULHW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22021,10 +21789,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW512", + name: "VPMINSW512", argLen: 2, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22036,13 +21804,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD512", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPMINSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22050,12 +21820,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW512", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPMULHW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22063,14 +21835,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW512", - argLen: 2, + name: "VPMULHWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22078,9 +21851,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW512", - argLen: 2, - asm: x86.AVPSUBSW, + name: "VPMULLW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22092,13 +21866,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVW512", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPMULLWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22106,15 +21882,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPMADDWD512", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22122,13 +21896,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW512", - argLen: 2, - asm: x86.AVPSRLVW, + name: "VPMADDWDMasked512", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22136,15 +21911,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVW512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPOPCNTW512", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22152,13 +21924,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW512", + name: "VPOPCNTWMasked512", argLen: 2, - asm: x86.AVPSRAVW, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22166,9 +21938,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW512", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22180,12 +21953,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW128", - argLen: 1, - asm: x86.AVPABSW, + name: "VPADDSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22193,10 +21969,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDW, + name: "VPSUBSW512", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22208,14 +21983,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQW, + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22223,9 +21998,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW128", + name: "VPSLLVW512", argLen: 2, - asm: x86.AVPCMPGTW, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22237,13 +22012,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked128", - argLen: 2, - asm: x86.AVPABSW, + name: "VPSHLDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22251,15 +22028,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPSHLDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22267,10 +22045,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPSLLVWMasked512", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22283,15 +22060,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPSRLVW512", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22299,15 +22074,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPSHRDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22315,15 +22090,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPSHRDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22331,9 +22107,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked128", + name: "VPSRLVWMasked512", argLen: 3, - asm: x86.AVPMADDWD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22346,13 +22122,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked128", + name: "VPSRAVW512", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22360,10 +22136,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSRAVWMasked512", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22376,14 +22151,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked128", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22391,9 +22165,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked128", + name: "VPSUBWMasked512", argLen: 3, - asm: x86.AVPSLLVW, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22406,16 +22180,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPABSW128", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22423,14 +22193,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked128", - argLen: 3, - asm: x86.AVPSRLVW, + name: "VPABSWMasked128", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22438,16 +22207,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPADDW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22455,9 +22222,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked128", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22470,14 +22238,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked128", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPCMPEQW128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22485,10 +22253,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPCMPGTW128", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22500,10 +22267,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW128", + name: "VPMAXSW128", argLen: 2, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22515,14 +22282,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW128", - argLen: 2, + name: "VPMAXSWMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULHW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22530,10 +22298,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW128", + name: "VPMINSW128", argLen: 2, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22545,13 +22313,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD128", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPMINSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22559,9 +22329,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW128", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22573,13 +22344,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW128", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22587,12 +22360,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW128", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPMULLW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22600,14 +22375,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW128", - argLen: 2, + name: "VPMULLWMasked128", + argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22615,9 +22391,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW128", + name: "VPMADDWD128", argLen: 2, - asm: x86.AVPHADDSW, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22629,13 +22405,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW128", - argLen: 2, - asm: x86.AVPHSUBSW, + name: "VPMADDWDMasked128", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22643,9 +22420,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW128", + name: "VPHADDW128", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22657,9 +22434,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW128", + name: "VPHSUBW128", argLen: 2, - asm: x86.AVPSLLW, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22671,13 +22448,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW128", - argLen: 2, - asm: x86.AVPSRLW, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22685,13 +22461,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAW128", + name: "VPOPCNTWMasked128", argLen: 2, - asm: x86.AVPSRAW, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22699,9 +22475,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVW128", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPADDSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22713,15 +22490,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22729,9 +22506,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW128", + name: "VPHADDSW128", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22743,15 +22520,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVW128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPHSUBSW128", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22759,9 +22534,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW128", + name: "VPSUBSW128", argLen: 2, - asm: x86.AVPSRAVW, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22773,13 +22548,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW128", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPSUBSWMasked128", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22787,9 +22563,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW128", + name: "VPSLLW128", argLen: 2, - asm: x86.AVPSUBW, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22801,12 +22577,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512", - argLen: 1, - asm: x86.AVPABSD, + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22814,10 +22591,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPSRAW128", + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22829,10 +22605,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512", - argLen: 2, - commutative: true, - asm: x86.AVPANDD, + name: "VPSLLVW128", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22844,13 +22619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - asm: x86.AVPANDND, + name: "VPSHLDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22858,13 +22635,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked512", - argLen: 2, - asm: x86.AVPABSD, + name: "VPSHLDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22872,10 +22652,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPSLLVWMasked128", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22888,15 +22667,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPSRLVW128", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22904,14 +22681,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512", - argLen: 3, - asm: x86.AVPANDND, + name: "VPSHRDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22919,15 +22697,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSHRDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22935,10 +22714,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSRLVWMasked128", + argLen: 3, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22951,15 +22729,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSRAVW128", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22967,10 +22743,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPSRAVWMasked128", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22983,16 +22758,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPSIGNW128", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23000,13 +22772,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512", + name: "VPSUBW128", argLen: 2, - asm: x86.AVPOPCNTD, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23014,9 +22786,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVDMasked512", + name: "VPSUBWMasked128", argLen: 3, - asm: x86.AVPROLVD, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23029,14 +22801,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked512", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPABSD512", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23044,16 +22814,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPABSDMasked512", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23061,16 +22828,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPADDD512", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23078,9 +22843,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked512", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23093,16 +22859,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPANDD512", + argLen: 2, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23110,9 +22874,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked512", - argLen: 3, - asm: x86.AVPSRLVD, + name: "VPANDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23125,16 +22890,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPANDND512", + argLen: 2, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23142,9 +22904,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked512", + name: "VPANDNDMasked512", argLen: 3, - asm: x86.AVPSRAVD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23157,9 +22919,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked512", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23172,16 +22950,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23189,10 +22965,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked512", + name: "VPMINSDMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23205,10 +22981,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD512", + name: "VPMULLD512", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23220,14 +22996,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512", - argLen: 2, + name: "VPMULLDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23235,10 +23012,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512", + name: "VPORD512", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23250,14 +23027,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORD512", - argLen: 2, + name: "VPORDMasked512", + argLen: 3, commutative: true, asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23280,6 +23058,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTD512", argLen: 1, @@ -23294,13 +23089,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD512", + name: "VPOPCNTDMasked512", argLen: 2, - asm: x86.AVPROLVD, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23308,9 +23103,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD512", + name: "VPROLVD512", argLen: 2, - asm: x86.AVPRORVD, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23322,15 +23117,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPROLVDMasked512", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23338,15 +23132,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPRORVD512", + argLen: 2, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23354,13 +23146,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD512", - argLen: 2, - asm: x86.AVPSLLVD, + name: "VPRORVDMasked512", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23368,10 +23161,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD512", + name: "VPDPWSSDS512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHLDVD, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23384,13 +23177,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD512", - argLen: 2, - asm: x86.AVPSRLVD, + name: "VPDPWSSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23398,10 +23194,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD512", + name: "VPDPBUSDS512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHRDVD, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23414,13 +23210,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD512", - argLen: 2, - asm: x86.AVPSRAVD, + name: "VPDPBUSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23428,9 +23227,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD512", + name: "VPSLLVD512", argLen: 2, - asm: x86.AVPSUBD, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23442,10 +23241,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD512", + name: "VPSHLDVD512", argLen: 3, resultInArg0: true, - asm: x86.AVPDPBUSD, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23458,27 +23257,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORD512", - argLen: 2, - commutative: true, - asm: x86.AVPXORD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSD128", - argLen: 1, - asm: x86.AVPABSD, + name: "VPSHLDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23486,14 +23274,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD128", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPSLLVDMasked512", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23501,10 +23289,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPSRLVD512", + argLen: 2, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23516,13 +23303,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD128", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPSHRDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23530,29 +23319,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128", - argLen: 2, - asm: x86.AVPABSD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPSHRDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23560,10 +23336,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23576,14 +23351,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", - argLen: 3, - asm: x86.AVPANDND, + name: "VPSRAVD512", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23591,10 +23365,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSRAVDMasked512", + argLen: 3, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23607,15 +23380,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSUBD512", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23623,10 +23394,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSUBDMasked512", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23639,15 +23409,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPDPBUSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23655,10 +23425,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked128", + name: "VPDPBUSDMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23672,13 +23442,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23686,9 +23457,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVDMasked128", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPXORDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23701,14 +23473,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked128", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPABSD128", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23716,16 +23486,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPABSDMasked128", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23733,16 +23500,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPADDD128", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23750,9 +23515,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked128", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPADDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23765,16 +23531,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPANDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23782,9 +23547,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked128", + name: "VPANDNDMasked128", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23797,16 +23562,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPCMPEQD128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23814,14 +23577,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked128", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPCMPGTD128", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23829,9 +23591,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23844,16 +23622,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23861,10 +23637,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128", + name: "VPMINSDMasked128", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23877,10 +23653,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD128", + name: "VPMULDQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23892,10 +23668,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD128", + name: "VPMULLD128", argLen: 2, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23907,14 +23683,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ128", - argLen: 2, + name: "VPMULLDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23922,14 +23699,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD128", - argLen: 2, + name: "VPORDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23952,6 +23730,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD128", argLen: 2, @@ -23994,13 +23789,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD128", + name: "VPOPCNTDMasked128", argLen: 2, - asm: x86.AVPROLVD, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24008,29 +23803,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD128", + name: "VPROLVD128", argLen: 2, - asm: x86.AVPRORVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPWSSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24038,15 +23817,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPROLVDMasked128", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24054,9 +23832,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLD128", + name: "VPRORVD128", argLen: 2, - asm: x86.AVPSLLD, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24068,13 +23846,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD128", - argLen: 2, - asm: x86.AVPSRLD, + name: "VPRORVDMasked128", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24082,13 +23861,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAD128", - argLen: 2, - asm: x86.AVPSRAD, + name: "VPDPWSSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24096,13 +23877,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD128", - argLen: 2, - asm: x86.AVPSLLVD, + name: "VPDPWSSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24110,10 +23894,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD128", + name: "VPDPBUSDS128", argLen: 3, resultInArg0: true, - asm: x86.AVPSHLDVD, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24126,13 +23910,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD128", - argLen: 2, - asm: x86.AVPSRLVD, + name: "VPDPBUSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24140,15 +23927,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPSLLD128", + argLen: 2, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24156,9 +23941,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD128", + name: "VPSRLD128", argLen: 2, - asm: x86.AVPSRAVD, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24170,9 +23955,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND128", + name: "VPSRAD128", argLen: 2, - asm: x86.AVPSIGND, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24184,9 +23969,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD128", + name: "VPSLLVD128", argLen: 2, - asm: x86.AVPSUBD, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24198,10 +23983,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD128", + name: "VPSHLDVD128", argLen: 3, resultInArg0: true, - asm: x86.AVPDPBUSD, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24214,12 +23999,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD256", - argLen: 1, - asm: x86.AVPABSD, + name: "VPSHLDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24227,14 +24016,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD256", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPSLLVDMasked128", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24242,10 +24031,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPSRLVD128", + argLen: 2, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24257,13 +24045,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD256", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPSHRDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24271,13 +24061,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256", - argLen: 2, - asm: x86.AVPABSD, + name: "VPSHRDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24285,10 +24078,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPSRLVDMasked128", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24301,15 +24093,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPSRAVD128", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24317,9 +24107,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked256", + name: "VPSRAVDMasked128", argLen: 3, - asm: x86.AVPANDND, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24332,15 +24122,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24348,15 +24136,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSUBD128", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24364,10 +24150,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSUBDMasked128", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24380,15 +24165,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPDPBUSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24396,10 +24181,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked256", + name: "VPDPBUSDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24413,23 +24198,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256", - argLen: 2, - asm: x86.AVPOPCNTD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPROLVDMasked256", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPXORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24442,14 +24214,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked256", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPABSD256", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24457,16 +24227,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPABSDMasked256", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24474,16 +24241,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPADDD256", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24491,9 +24256,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked256", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPADDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24506,16 +24272,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPANDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24523,9 +24288,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked256", + name: "VPANDNDMasked256", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24538,16 +24303,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPCMPEQD256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24555,14 +24318,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked256", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPCMPGTD256", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24570,9 +24332,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked256", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24585,16 +24363,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24602,10 +24378,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256", + name: "VPMINSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24618,10 +24394,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD256", + name: "VPMULDQ256", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24633,10 +24409,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD256", + name: "VPMULLD256", argLen: 2, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24648,14 +24424,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ256", - argLen: 2, + name: "VPMULLDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24663,14 +24440,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD256", - argLen: 2, + name: "VPORDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24693,6 +24471,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD256", argLen: 2, @@ -24734,6 +24529,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTDMasked256", + argLen: 2, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPROLVD256", argLen: 2, @@ -24748,6 +24557,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked256", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPRORVD256", argLen: 2, @@ -24762,6 +24586,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPRORVDMasked256", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS256", argLen: 3, @@ -24778,6 +24617,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPBUSDS256", argLen: 3, @@ -24794,6 +24650,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSLLD256", argLen: 2, @@ -24866,6 +24739,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHLDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVDMasked256", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLVD256", argLen: 2, @@ -24896,6 +24801,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHRDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRAVD256", argLen: 2, @@ -24910,6 +24847,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRAVDMasked256", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND256", argLen: 2, @@ -24938,6 +24890,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBDMasked256", + argLen: 3, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPBUSD256", argLen: 3, @@ -24955,12 +24922,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ128", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPDPBUSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24968,14 +24939,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ128", - argLen: 2, + name: "VPXORDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24983,14 +24955,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, + name: "VPABSQ128", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25011,6 +24981,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDQMasked128", argLen: 3, @@ -25059,15 +25044,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked128", - argLen: 3, + name: "VPCMPEQQ128", + argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25075,15 +25059,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked128", - argLen: 3, + name: "VPMAXSQ128", + argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25091,10 +25074,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked128", + name: "VPMAXSQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25107,15 +25090,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked128", - argLen: 3, + name: "VPMINSQ128", + argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25123,10 +25105,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked128", + name: "VPMINSQMasked128", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25139,23 +25121,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked128", - argLen: 2, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPROLVQMasked128", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPMULDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25168,14 +25137,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked128", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPMULLQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25183,9 +25152,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked128", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPMULLQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25198,9 +25168,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked128", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPORQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25213,14 +25184,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked128", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25228,14 +25197,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked128", - argLen: 3, - asm: x86.AVPSLLVQ, + name: "VPOPCNTQMasked128", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25243,16 +25211,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPROLVQ128", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25260,9 +25225,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked128", + name: "VPROLVQMasked128", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25275,16 +25240,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPRORVQ128", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25292,9 +25254,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked128", + name: "VPRORVQMasked128", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25307,14 +25269,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked128", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSLLQ128", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25322,10 +25283,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSLLQMasked128", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25338,10 +25298,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSRLQ128", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25353,14 +25312,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25368,10 +25327,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSRAQ128", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25383,12 +25341,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRAQMasked128", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25396,9 +25356,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ128", + name: "VPSLLVQ128", argLen: 2, - asm: x86.AVPROLVQ, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25410,13 +25370,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ128", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPSHLDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25424,13 +25386,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ128", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPSHLDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25438,13 +25403,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ128", - argLen: 2, - asm: x86.AVPSRLQ, + name: "VPSLLVQMasked128", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25452,9 +25418,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ128", + name: "VPSRLVQ128", argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25466,13 +25432,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ128", - argLen: 2, - asm: x86.AVPSLLVQ, + name: "VPSHRDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25480,15 +25448,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ128", - argLen: 3, + name: "VPSHRDVQMasked128", + argLen: 4, resultInArg0: true, - asm: x86.AVPSHLDVQ, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25496,13 +25465,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ128", - argLen: 2, + name: "VPSRLVQMasked128", + argLen: 3, asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25510,15 +25480,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPSRAVQ128", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25526,13 +25494,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ128", - argLen: 2, + name: "VPSRAVQMasked128", + argLen: 3, asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25554,12 +25523,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25567,14 +25538,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ256", - argLen: 2, + name: "VPXORQMasked128", + argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25582,14 +25554,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, + name: "VPABSQ256", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25597,13 +25567,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", + name: "VPABSQMasked256", argLen: 2, - asm: x86.AVPCMPGTQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25611,13 +25581,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPADDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25671,6 +25642,50 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQQ256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSQMasked256", argLen: 3, @@ -25687,6 +25702,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMINSQMasked256", argLen: 3, @@ -25720,15 +25750,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", - argLen: 3, + name: "VPMULLQ256", + argLen: 2, commutative: true, asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25736,10 +25765,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", + name: "VPMULLQMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25752,13 +25781,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25766,14 +25797,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked256", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25781,14 +25810,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked256", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPOPCNTQMasked256", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25796,14 +25824,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked256", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPROLVQ256", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25811,9 +25838,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked256", + name: "VPROLVQMasked256", argLen: 3, - asm: x86.AVPSRLQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25826,14 +25853,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked256", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPRORVQ256", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25841,9 +25867,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked256", + name: "VPRORVQMasked256", argLen: 3, - asm: x86.AVPSLLVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25856,16 +25882,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPSLLQ256", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25873,9 +25896,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked256", + name: "VPSLLQMasked256", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25888,16 +25911,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25905,9 +25925,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked256", + name: "VPSRLQMasked256", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25920,14 +25940,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSRAQ256", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25935,10 +25954,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSRAQMasked256", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25951,10 +25969,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSLLVQ256", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25966,14 +25983,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSHLDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25981,14 +25999,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSHLDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25996,12 +26016,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSLLVQMasked256", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26009,9 +26031,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ256", + name: "VPSRLVQ256", argLen: 2, - asm: x86.AVPROLVQ, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26023,13 +26045,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ256", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPSHRDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26037,13 +26061,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ256", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPSHRDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26051,9 +26093,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ256", + name: "VPSRAVQ256", argLen: 2, - asm: x86.AVPSRLQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26065,13 +26107,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ256", - argLen: 2, - asm: x86.AVPSRAQ, + name: "VPSRAVQMasked256", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26079,9 +26122,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ256", + name: "VPSUBQ256", argLen: 2, - asm: x86.AVPSLLVQ, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26093,15 +26136,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26109,13 +26151,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ256", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPXORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26123,15 +26167,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26139,13 +26180,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ256", + name: "VPABSQMasked512", argLen: 2, - asm: x86.AVPSRAVQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26153,9 +26194,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26167,12 +26209,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPADDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26180,10 +26225,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", + name: "VPANDQ512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26195,14 +26240,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", - argLen: 2, + name: "VPANDQMasked512", + argLen: 3, commutative: true, asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26224,13 +26270,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26238,15 +26285,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512", - argLen: 3, + name: "VPMAXSQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26254,10 +26300,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512", + name: "VPMAXSQMasked512", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26270,14 +26316,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26285,10 +26331,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPMINSQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26301,15 +26347,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512", - argLen: 3, + name: "VPMULDQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26333,15 +26378,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", - argLen: 3, + name: "VPMULLQ512", + argLen: 2, commutative: true, asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26349,10 +26393,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", + name: "VPMULLQMasked512", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26365,28 +26409,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPROLVQMasked512", - argLen: 3, - asm: x86.AVPROLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26394,9 +26424,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked512", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26409,14 +26440,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked512", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26424,14 +26453,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked512", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPOPCNTQMasked512", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26439,14 +26467,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked512", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPROLVQ512", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26454,9 +26481,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked512", + name: "VPROLVQMasked512", argLen: 3, - asm: x86.AVPSLLVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26469,16 +26496,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPRORVQ512", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26486,9 +26510,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked512", + name: "VPRORVQMasked512", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26501,16 +26525,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPSLLQ512", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26518,9 +26539,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked512", + name: "VPSLLQMasked512", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26533,14 +26554,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26548,10 +26568,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSRLQMasked512", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26564,10 +26583,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSRAQ512", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26579,14 +26597,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRAQMasked512", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26594,10 +26612,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPSLLVQ512", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26609,14 +26626,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSHLDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26624,14 +26642,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPORQ, + name: "VPSHLDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26639,12 +26659,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSLLVQMasked512", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26652,9 +26674,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ512", + name: "VPSRLVQ512", argLen: 2, - asm: x86.AVPROLVQ, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26666,13 +26688,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ512", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPSHRDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26680,13 +26704,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ512", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPSHRDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26694,13 +26721,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ512", - argLen: 2, - asm: x86.AVPSRLQ, + name: "VPSRLVQMasked512", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26708,9 +26736,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ512", + name: "VPSRAVQ512", argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26722,13 +26750,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ512", - argLen: 2, - asm: x86.AVPSLLVQ, + name: "VPSRAVQMasked512", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26736,15 +26765,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26752,13 +26779,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ512", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26766,15 +26794,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPXORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26782,13 +26809,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ512", - argLen: 2, - asm: x86.AVPSRAVQ, + name: "VPXORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26796,13 +26825,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPABSB128", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26810,14 +26838,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26825,12 +26852,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, + name: "VPADDB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26838,14 +26867,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB128", - argLen: 2, + name: "VPADDBMasked128", + argLen: 3, commutative: true, asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26911,29 +26941,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", - argLen: 2, - asm: x86.AVPABSB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDBMasked128", - argLen: 3, + name: "VPMAXSB128", + argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26957,29 +26972,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, + name: "VPMINSB128", + argLen: 2, commutative: true, asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTBMasked128", - argLen: 2, - asm: x86.AVPOPCNTB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26987,10 +26987,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27003,14 +27003,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPOR128", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27018,14 +27018,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27033,14 +27031,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27048,10 +27045,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", + name: "VPADDSB128", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27063,14 +27060,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, + name: "VPADDSBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPOR, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27078,12 +27076,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27091,14 +27090,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27106,9 +27105,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", + name: "VPSIGNB128", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27120,9 +27119,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", + name: "VPSUBB128", argLen: 2, - asm: x86.AVPSIGNB, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27134,13 +27133,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", - argLen: 2, + name: "VPSUBBMasked128", + argLen: 3, asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27175,6 +27175,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDB256", argLen: 2, @@ -27190,6 +27204,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAND256", argLen: 2, @@ -27249,29 +27279,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDBMasked256", - argLen: 3, + name: "VPMAXSB256", + argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27295,29 +27310,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", - argLen: 3, + name: "VPMINSB256", + argLen: 2, commutative: true, asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTBMasked256", - argLen: 2, - asm: x86.AVPOPCNTB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27325,10 +27325,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", + name: "VPMINSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27341,14 +27341,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27356,14 +27356,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked256", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27371,14 +27369,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27386,10 +27383,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", + name: "VPADDSB256", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27401,14 +27398,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, + name: "VPADDSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVPOR, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27416,12 +27414,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27429,14 +27428,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27444,9 +27443,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", + name: "VPSIGNB256", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27458,9 +27457,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB256", + name: "VPSUBB256", argLen: 2, - asm: x86.AVPSIGNB, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27472,13 +27471,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB256", - argLen: 2, + name: "VPSUBBMasked256", + argLen: 3, asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27514,14 +27514,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27529,13 +27528,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked512", - argLen: 2, - asm: x86.AVPABSB, + name: "VPADDB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27559,15 +27559,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, + name: "VPMAXSB512", + argLen: 2, commutative: true, asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27575,10 +27574,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked512", + name: "VPMAXSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27591,13 +27590,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked512", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27605,10 +27605,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", + name: "VPMINSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27621,14 +27621,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked512", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27636,14 +27634,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPOPCNTBMasked512", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27651,10 +27648,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB512", + name: "VPADDSB512", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27666,14 +27663,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", - argLen: 2, + name: "VPADDSBMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27681,12 +27679,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27694,14 +27693,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27709,9 +27708,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", + name: "VPSUBB512", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27723,13 +27722,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB512", - argLen: 2, + name: "VPSUBBMasked512", + argLen: 3, asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27768,15 +27768,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", - argLen: 3, + name: "VPMAXUW256", + argLen: 2, commutative: true, asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27784,10 +27783,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", + name: "VPMAXUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27800,15 +27799,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked256", - argLen: 3, + name: "VPMINUW256", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27816,14 +27814,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW256", - argLen: 2, + name: "VPMINUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27831,10 +27830,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", + name: "VPMULHUW256", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27846,14 +27845,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, + name: "VPMULHUWMasked256", + argLen: 3, commutative: true, asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27892,15 +27892,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, + name: "VPMAXUW512", + argLen: 2, commutative: true, asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27908,10 +27907,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27924,15 +27923,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked512", - argLen: 3, + name: "VPMINUW512", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27940,14 +27938,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW512", - argLen: 2, + name: "VPMINUWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27955,10 +27954,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW512", + name: "VPMULHUW512", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27970,14 +27969,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW512", - argLen: 2, + name: "VPMULHUWMasked512", + argLen: 3, commutative: true, asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28015,6 +28015,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMAXUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUWMasked128", argLen: 3, @@ -28032,15 +28047,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked128", - argLen: 3, + name: "VPMINUW128", + argLen: 2, commutative: true, asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28048,10 +28062,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28064,10 +28078,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", + name: "VPMULHUW128", argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28079,14 +28093,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW128", - argLen: 2, + name: "VPMULHUWMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28094,10 +28109,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", + name: "VPMAXUD512", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28125,15 +28140,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked512", - argLen: 3, + name: "VPMINUD512", + argLen: 2, commutative: true, asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28141,14 +28155,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512", - argLen: 2, + name: "VPMINUDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28156,10 +28171,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", + name: "VPMAXUD128", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28187,15 +28202,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, + name: "VPMINUD128", + argLen: 2, commutative: true, asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28203,14 +28217,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, + name: "VPMINUDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28218,10 +28233,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", + name: "VPMULUDQ128", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28233,10 +28248,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ128", + name: "VPMAXUD256", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28264,15 +28279,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", - argLen: 3, + name: "VPMINUD256", + argLen: 2, commutative: true, asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28280,14 +28294,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", - argLen: 2, + name: "VPMINUDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28295,10 +28310,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", + name: "VPMULUDQ256", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28310,10 +28325,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", + name: "VPMAXUQ128", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28340,6 +28355,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMINUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMINUQMasked128", argLen: 3, @@ -28373,7 +28403,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", + name: "VPMAXUQ256", argLen: 2, commutative: true, asm: x86.AVPMAXUQ, @@ -28388,14 +28418,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", - argLen: 2, + name: "VPMAXUQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28403,15 +28434,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256", - argLen: 3, + name: "VPMINUQ256", + argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28451,7 +28481,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", + name: "VPMAXUQ512", argLen: 2, commutative: true, asm: x86.AVPMAXUQ, @@ -28465,21 +28495,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMINUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUQMasked512", argLen: 3, @@ -28497,15 +28512,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", - argLen: 3, + name: "VPMINUQ512", + argLen: 2, commutative: true, asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28513,10 +28527,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked512", + name: "VPMINUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28529,10 +28543,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", + name: "VPMULUDQ512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28544,14 +28558,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", - argLen: 2, + name: "VPMULUDQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28559,10 +28574,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", + name: "VPAVGB128", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28574,14 +28589,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", - argLen: 2, + name: "VPAVGBMasked128", + argLen: 3, commutative: true, asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28602,22 +28618,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPAVGBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VGF2P8MULBMasked128", argLen: 3, @@ -28634,15 +28634,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", - argLen: 3, + name: "VPMAXUB128", + argLen: 2, commutative: true, asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28650,25 +28649,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", + name: "VPMAXUBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMADDUBSWMasked128", - argLen: 3, - asm: x86.AVPMADDUBSW, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28681,10 +28665,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", + name: "VPMINUB128", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28696,14 +28680,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", - argLen: 2, + name: "VPMINUBMasked128", + argLen: 3, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28725,14 +28710,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28740,9 +28725,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB256", - argLen: 2, - asm: x86.AVGF2P8MULB, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28770,14 +28756,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked256", - argLen: 3, + name: "VGF2P8MULB256", + argLen: 2, asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28785,10 +28770,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VGF2P8MULBMasked256", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28801,15 +28785,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked256", - argLen: 3, + name: "VPMAXUB256", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28817,9 +28800,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28832,10 +28816,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", + name: "VPMINUB256", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28847,14 +28831,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", - argLen: 2, + name: "VPMINUBMasked256", + argLen: 3, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28876,14 +28861,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28891,9 +28876,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB512", - argLen: 2, - asm: x86.AVGF2P8MULB, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28921,14 +28907,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked512", - argLen: 3, + name: "VGF2P8MULB512", + argLen: 2, asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28936,10 +28921,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VGF2P8MULBMasked512", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28952,15 +28936,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked512", - argLen: 3, + name: "VPMAXUB512", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28968,9 +28951,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked512", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28983,10 +28967,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", + name: "VPMINUB512", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28998,14 +28982,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", - argLen: 2, + name: "VPMINUBMasked512", + argLen: 3, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29026,6 +29011,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VRNDSCALEPS512", auxType: auxInt8, @@ -29041,13 +29041,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS512", + name: "VRNDSCALEPSMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29055,26 +29056,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked512", + name: "VREDUCEPSMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29086,17 +29085,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29146,13 +29146,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS128", + name: "VRNDSCALEPSMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29160,15 +29161,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29176,10 +29175,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked128", + name: "VREDUCEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29191,14 +29190,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29251,29 +29251,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256", + name: "VRNDSCALEPSMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29281,10 +29266,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VEXTRACTF128128", + name: "VREDUCEPS256", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTF128, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29295,10 +29280,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked256", + name: "VREDUCEPSMasked256", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29310,14 +29295,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29341,6 +29327,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXTRACTF128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTF128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VINSERTF128256", auxType: auxInt8, @@ -29385,13 +29385,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD128", + name: "VRNDSCALEPDMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29399,15 +29400,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPD, + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29415,15 +29414,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPD, + name: "VREDUCEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29431,14 +29429,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VDPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29446,14 +29445,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29506,13 +29506,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD256", + name: "VRNDSCALEPDMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29520,15 +29521,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPD, + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29536,10 +29535,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked256", + name: "VREDUCEPDMasked256", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29551,14 +29550,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29597,13 +29597,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD512", + name: "VRNDSCALEPDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29611,26 +29612,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPD, + name: "VREDUCEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked512", + name: "VREDUCEPDMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29642,17 +29641,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29674,31 +29674,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - }, - }, - { - name: "VPCMPWMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + }, + }, + { + name: "VPCMPW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29706,15 +29706,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDWMasked256", + name: "VPSHLDW256", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29722,10 +29721,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked256", + name: "VPSHLDWMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29738,10 +29737,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW256", + name: "VPSHRDW256", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29753,14 +29752,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW256", + name: "VPSHRDWMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29801,15 +29801,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDWMasked512", + name: "VPSHLDW512", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29817,10 +29816,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked512", + name: "VPSHLDWMasked512", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29833,10 +29832,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW512", + name: "VPSHRDW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29848,20 +29847,38 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW512", + name: "VPSHRDWMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, + { + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPEXTRW128", auxType: auxInt8, @@ -29892,32 +29909,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHLDWMasked128", + name: "VPSHLDW128", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29925,10 +29939,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked128", + name: "VPSHLDWMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29941,25 +29955,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSHLDW128", + name: "VPSHRDW128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29971,14 +29970,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW128", + name: "VPSHRDWMasked128", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30019,14 +30019,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked512", + name: "VPROLD512", auxType: auxInt8, - argLen: 2, + argLen: 1, asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30034,10 +30033,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORDMasked512", + name: "VPROLDMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30049,45 +30048,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked512", + name: "VPRORD512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "VPSHRDDMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLD512", + name: "VPRORDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30095,13 +30077,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD512", + name: "VPSHLDD512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30109,14 +30092,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD512", + name: "VPSHLDDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30139,31 +30123,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRD128", + name: "VPSHRDDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRD, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - }, - }, - }, - { - name: "VPCMPD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30185,45 +30156,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked128", + name: "VPEXTRD128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLD, + argLen: 1, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPRORDMasked128", + name: "VPCMPD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDDMasked128", + name: "VPROLD128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30231,15 +30199,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked128", + name: "VPROLDMasked128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30247,10 +30214,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD128", + name: "VPRORD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLD, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30261,13 +30228,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD128", + name: "VPRORDMasked128", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30305,14 +30273,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD128", + name: "VPSHLDDMasked128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDD, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30320,26 +30289,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD256", + name: "VPSHRDD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPD, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPSHRDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30347,50 +30315,50 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLD, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORDMasked256", + name: "VPCMPD256", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDDMasked256", + name: "VPROLD256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30398,15 +30366,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked256", + name: "VPROLDMasked256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30414,10 +30381,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD256", + name: "VPRORD256", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLD, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30428,13 +30395,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD256", + name: "VPRORDMasked256", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30457,14 +30425,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD256", + name: "VPSHLDDMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDD, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30472,31 +30441,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VPSHRDD256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRQ, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ128", + name: "VPSHRDDMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -30518,45 +30489,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked128", + name: "VPEXTRQ128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLQ, + argLen: 1, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPRORQMasked128", + name: "VPCMPQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORQ, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDQMasked128", + name: "VPROLQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30564,15 +30532,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked128", + name: "VPROLQMasked128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30580,10 +30547,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ128", + name: "VPRORQ128", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLQ, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30594,13 +30561,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ128", + name: "VPRORQMasked128", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30637,6 +30605,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHLDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSHRDQ128", auxType: auxInt8, @@ -30653,17 +30637,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ256", + name: "VPSHRDQMasked128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -30684,6 +30669,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPROLQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPROLQMasked256", auxType: auxInt8, @@ -30699,6 +30713,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPRORQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPRORQMasked256", auxType: auxInt8, @@ -30715,15 +30743,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked256", + name: "VPSHLDQ256", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30731,10 +30758,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked256", + name: "VPSHLDQMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDQ, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30747,38 +30774,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPRORQ256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSHLDQ256", + name: "VPSHRDQ256", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDQ, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30790,14 +30789,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQ256", + name: "VPSHRDQMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30838,14 +30838,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked512", + name: "VPROLQ512", auxType: auxInt8, - argLen: 2, + argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30853,10 +30852,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked512", + name: "VPROLQMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORQ, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30868,15 +30867,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked512", + name: "VPRORQ512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30884,15 +30881,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked512", + name: "VPRORQMasked512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30900,13 +30896,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ512", + name: "VPSHLDQ512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLQ, + argLen: 2, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30914,13 +30911,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ512", + name: "VPSHLDQMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORQ, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30928,10 +30927,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQ512", + name: "VPSHRDQ512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDQ, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30943,20 +30942,38 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQ512", + name: "VPSHRDQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, + { + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPEXTRB128", auxType: auxInt8, @@ -30987,34 +31004,34 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPINSRB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRB128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRB, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31047,23 +31064,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VINSERTI128256", auxType: auxInt8, @@ -31425,6 +31425,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VGF2P8AFFINEQB128", auxType: auxInt8, @@ -31456,11 +31473,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VGF2P8AFFINEINVQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31468,7 +31484,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -31489,31 +31505,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPUB256", + name: "VPCMPUBMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31551,11 +31568,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31563,7 +31579,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -31584,31 +31600,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPUB512", + name: "VPCMPUBMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31646,27 +31663,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VGF2P8AFFINEQBMasked512", + name: "VGF2P8AFFINEINVQBMasked512", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31679,10 +31679,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked512", + name: "VGF2P8AFFINEQBMasked512", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -58852,207 +58852,202 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "ApproximateReciprocalFloat32x16", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat32x16", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat32x16", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", + argLen: 2, + generic: true, + }, { name: "DivFloat32x16", argLen: 2, generic: true, }, + { + name: "DivMaskedFloat32x16", + argLen: 3, + generic: true, + }, { name: "EqualFloat32x16", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FusedMultiplyAddFloat32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x16", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat32x16", + name: "FusedMultiplyAddSubFloat32x16", argLen: 3, generic: true, }, { - name: "GreaterFloat32x16", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat32x16", - argLen: 2, + name: "FusedMultiplySubAddFloat32x16", + argLen: 3, generic: true, }, { - name: "IsNanFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat32x16", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "LessEqualFloat32x16", + name: "GreaterFloat32x16", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat32x16", + name: "GreaterEqualFloat32x16", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x16", - argLen: 2, + name: "GreaterEqualMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat32x16", + name: "GreaterMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x16", - argLen: 3, + name: "IsNanFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x16", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x16", - argLen: 4, + name: "LessFloat32x16", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x16", - argLen: 4, + name: "LessEqualFloat32x16", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat32x16", + name: "LessEqualMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x16", + name: "LessMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x16", - argLen: 3, + name: "MaxFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat32x16", + name: "MaxMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x16", - argLen: 3, + name: "MinFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x16", + name: "MinMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat32x16", - argLen: 3, + name: "MulFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x16", + name: "MulByPowOf2Float32x16", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x16", + name: "MulByPowOf2MaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaxFloat32x16", - argLen: 2, + name: "MulMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MinFloat32x16", + name: "NotEqualFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x16", - argLen: 2, + name: "NotEqualMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x16", - argLen: 2, + name: "SqrtFloat32x16", + argLen: 1, generic: true, }, { - name: "NotEqualFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SqrtFloat32x16", - argLen: 1, + name: "SqrtMaskedFloat32x16", + argLen: 2, generic: true, }, { @@ -59060,12 +59055,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedFloat32x16", + argLen: 3, + generic: true, + }, { name: "AddFloat32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat32x4", argLen: 2, @@ -59076,11 +59082,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat32x4", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat32x4", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", + argLen: 2, + generic: true, + }, { name: "CeilFloat32x4", argLen: 1, @@ -59091,12 +59107,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DivMaskedFloat32x4", + argLen: 3, + generic: true, + }, { name: "EqualFloat32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat32x4", argLen: 1, @@ -59108,174 +59135,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat32x4", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat32x4", + name: "FusedMultiplyAddSubFloat32x4", argLen: 3, generic: true, }, { - name: "GreaterFloat32x4", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat32x4", - argLen: 2, + name: "FusedMultiplySubAddFloat32x4", + argLen: 3, generic: true, }, { - name: "IsNanFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat32x4", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "LessEqualFloat32x4", + name: "GreaterFloat32x4", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat32x4", + name: "GreaterEqualFloat32x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x4", - argLen: 2, + name: "GreaterEqualMaskedFloat32x4", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat32x4", + name: "GreaterMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x4", - argLen: 3, + name: "IsNanFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x4", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x4", - argLen: 4, + name: "LessFloat32x4", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x4", - argLen: 4, + name: "LessEqualFloat32x4", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat32x4", + name: "LessEqualMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x4", + name: "LessMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x4", - argLen: 3, + name: "MaxFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat32x4", + name: "MaxMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x4", - argLen: 3, + name: "MinFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x4", + name: "MinMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat32x4", - argLen: 3, + name: "MulFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x4", + name: "MulByPowOf2Float32x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x4", + name: "MulByPowOf2MaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaxFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat32x4", - argLen: 2, + name: "MulMaskedFloat32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "NotEqualFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat32x4", - argLen: 2, + name: "NotEqualMaskedFloat32x4", + argLen: 3, commutative: true, generic: true, }, @@ -59299,11 +59289,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat32x4", + argLen: 2, + generic: true, + }, { name: "SubFloat32x4", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat32x4", + argLen: 3, + generic: true, + }, { name: "TruncFloat32x4", argLen: 1, @@ -59315,6 +59315,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat32x8", argLen: 2, @@ -59325,11 +59331,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat32x8", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat32x8", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", + argLen: 2, + generic: true, + }, { name: "CeilFloat32x8", argLen: 1, @@ -59340,12 +59356,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DivMaskedFloat32x8", + argLen: 3, + generic: true, + }, { name: "EqualFloat32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat32x8", argLen: 1, @@ -59357,174 +59384,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat32x8", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat32x8", + name: "FusedMultiplyAddSubFloat32x8", argLen: 3, generic: true, }, { - name: "GreaterFloat32x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat32x8", - argLen: 2, + name: "FusedMultiplySubAddFloat32x8", + argLen: 3, generic: true, }, { - name: "IsNanFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat32x8", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "LessEqualFloat32x8", + name: "GreaterFloat32x8", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat32x8", + name: "GreaterEqualFloat32x8", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x8", - argLen: 2, + name: "GreaterEqualMaskedFloat32x8", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat32x8", + name: "GreaterMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x8", - argLen: 3, + name: "IsNanFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x8", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x8", - argLen: 4, + name: "LessFloat32x8", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x8", - argLen: 4, + name: "LessEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat32x8", + name: "LessEqualMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x8", + name: "LessMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x8", - argLen: 3, + name: "MaxFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat32x8", + name: "MaxMaskedFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x8", - argLen: 3, + name: "MinFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x8", + name: "MinMaskedFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat32x8", - argLen: 3, + name: "MulFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x8", + name: "MulByPowOf2Float32x8", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x8", + name: "MulByPowOf2MaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaxFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat32x8", - argLen: 2, + name: "MulMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "NotEqualFloat32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x8", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat32x8", - argLen: 2, + name: "NotEqualMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, @@ -59548,11 +59538,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat32x8", + argLen: 2, + generic: true, + }, { name: "SubFloat32x8", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat32x8", + argLen: 3, + generic: true, + }, { name: "TruncFloat32x8", argLen: 1, @@ -59564,6 +59564,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat64x2", argLen: 2, @@ -59574,19 +59580,34 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat64x2", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat64x2", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", + argLen: 2, + generic: true, + }, { name: "CeilFloat64x2", argLen: 1, generic: true, }, { - name: "DivFloat64x2", - argLen: 2, + name: "DivFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "DivMaskedFloat64x2", + argLen: 3, generic: true, }, { @@ -59601,6 +59622,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat64x2", argLen: 1, @@ -59612,174 +59639,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat64x2", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x2", + name: "FusedMultiplyAddSubFloat64x2", argLen: 3, generic: true, }, { - name: "GreaterFloat64x2", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x2", - argLen: 2, + name: "FusedMultiplySubAddFloat64x2", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat64x2", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "LessEqualFloat64x2", + name: "GreaterFloat64x2", argLen: 2, generic: true, }, { - name: "MaskedAddFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat64x2", + name: "GreaterEqualFloat64x2", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x2", - argLen: 2, + name: "GreaterEqualMaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x2", + name: "GreaterMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x2", - argLen: 3, + name: "IsNanFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x2", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x2", - argLen: 4, + name: "LessFloat64x2", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x2", - argLen: 4, + name: "LessEqualFloat64x2", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat64x2", + name: "LessEqualMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x2", + name: "LessMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x2", - argLen: 3, + name: "MaxFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x2", + name: "MaxMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x2", - argLen: 3, + name: "MinFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x2", + name: "MinMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat64x2", - argLen: 3, + name: "MulFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x2", + name: "MulByPowOf2Float64x2", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x2", + name: "MulByPowOf2MaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaxFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat64x2", - argLen: 2, + name: "MulMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat64x2", + name: "NotEqualFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x2", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat64x2", - argLen: 2, + name: "NotEqualMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, @@ -59803,11 +59793,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat64x2", + argLen: 2, + generic: true, + }, { name: "SubFloat64x2", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat64x2", + argLen: 3, + generic: true, + }, { name: "TruncFloat64x2", argLen: 1, @@ -59819,6 +59819,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat64x4", argLen: 2, @@ -59829,11 +59835,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat64x4", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat64x4", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", + argLen: 2, + generic: true, + }, { name: "CeilFloat64x4", argLen: 1, @@ -59844,12 +59860,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DivMaskedFloat64x4", + argLen: 3, + generic: true, + }, { name: "EqualFloat64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat64x4", argLen: 1, @@ -59861,174 +59888,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat64x4", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x4", + name: "FusedMultiplyAddSubFloat64x4", argLen: 3, generic: true, }, { - name: "GreaterFloat64x4", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x4", - argLen: 2, + name: "FusedMultiplySubAddFloat64x4", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat64x4", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "LessEqualFloat64x4", + name: "GreaterFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedAddFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat64x4", + name: "GreaterEqualFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x4", - argLen: 2, + name: "GreaterEqualMaskedFloat64x4", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x4", + name: "GreaterMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x4", - argLen: 3, + name: "IsNanFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x4", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x4", - argLen: 4, + name: "LessFloat64x4", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x4", - argLen: 4, + name: "LessEqualFloat64x4", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat64x4", + name: "LessEqualMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x4", + name: "LessMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x4", - argLen: 3, + name: "MaxFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x4", + name: "MaxMaskedFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x4", - argLen: 3, + name: "MinFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x4", + name: "MinMaskedFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat64x4", - argLen: 3, + name: "MulFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x4", + name: "MulByPowOf2Float64x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x4", + name: "MulByPowOf2MaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaxFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat64x4", - argLen: 2, + name: "MulMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat64x4", + name: "NotEqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x4", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat64x4", - argLen: 2, + name: "NotEqualMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, @@ -60052,11 +60042,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat64x4", + argLen: 2, + generic: true, + }, { name: "SubFloat64x4", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat64x4", + argLen: 3, + generic: true, + }, { name: "TruncFloat64x4", argLen: 1, @@ -60068,207 +60068,202 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "ApproximateReciprocalFloat64x8", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat64x8", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat64x8", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", + argLen: 2, + generic: true, + }, { name: "DivFloat64x8", argLen: 2, generic: true, }, + { + name: "DivMaskedFloat64x8", + argLen: 3, + generic: true, + }, { name: "EqualFloat64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FusedMultiplyAddFloat64x8", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x8", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x8", + name: "FusedMultiplyAddSubFloat64x8", argLen: 3, generic: true, }, { - name: "GreaterFloat64x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddFloat64x8", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "LessEqualFloat64x8", + name: "GreaterFloat64x8", argLen: 2, generic: true, }, { - name: "MaskedAddFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat64x8", + name: "GreaterEqualFloat64x8", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x8", - argLen: 2, + name: "GreaterEqualMaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x8", + name: "GreaterMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x8", - argLen: 3, + name: "IsNanFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x8", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x8", - argLen: 4, + name: "LessFloat64x8", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x8", - argLen: 4, + name: "LessEqualFloat64x8", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat64x8", + name: "LessEqualMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x8", + name: "LessMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x8", - argLen: 3, + name: "MaxFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x8", + name: "MaxMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x8", - argLen: 3, + name: "MinFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x8", + name: "MinMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat64x8", - argLen: 3, + name: "MulFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x8", + name: "MulByPowOf2Float64x8", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x8", + name: "MulByPowOf2MaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaxFloat64x8", - argLen: 2, + name: "MulMaskedFloat64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MinFloat64x8", + name: "NotEqualFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x8", - argLen: 2, + name: "NotEqualMaskedFloat64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x8", - argLen: 2, + name: "SqrtFloat64x8", + argLen: 1, generic: true, }, { - name: "NotEqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SqrtFloat64x8", - argLen: 1, + name: "SqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { @@ -60276,17 +60271,33 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedFloat64x8", + argLen: 3, + generic: true, + }, { name: "AbsoluteInt16x16", argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt16x16", + argLen: 2, + generic: true, + }, { name: "AddInt16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt16x16", argLen: 2, @@ -60304,6 +60315,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt16x16", argLen: 2, @@ -60315,160 +60332,92 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt16x16", - argLen: 2, + name: "GreaterMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt16x16", + name: "LessInt16x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt16x16", - argLen: 3, + name: "LessEqualInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedLessInt16x16", + name: "LessEqualMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x16", + name: "LessMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x16", - argLen: 3, + name: "MaxInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt16x16", + name: "MaxMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x16", - argLen: 3, + name: "MinInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x16", + name: "MinMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x16", - argLen: 3, + name: "MulHighInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedPopCountInt16x16", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt16x16", + name: "MulHighMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftAndFillUpperFromInt16x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftRightAndFillUpperFromInt16x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaxInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt16x16", + name: "MulLowInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x16", - argLen: 2, + name: "MulLowMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt16x16", + name: "NotEqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x16", - argLen: 2, + name: "NotEqualMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, @@ -60483,6 +60432,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PairDotProdMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt16x16", argLen: 2, @@ -60498,12 +60452,23 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt16x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedPairwiseAddInt16x16", argLen: 2, @@ -60519,6 +60484,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftInt16x16", argLen: 2, @@ -60545,188 +60515,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt16x16", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x16", + name: "ShiftLeftMaskedInt16x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SubInt16x16", - argLen: 2, - generic: true, - }, - { - name: "XorInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt16x32", - argLen: 1, - generic: true, - }, - { - name: "AddInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt16x32", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt16x32", - argLen: 2, - generic: true, - }, - { - name: "LessInt16x32", + name: "ShiftRightInt16x16", argLen: 2, generic: true, }, { - name: "LessEqualInt16x32", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt16x32", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, - { - name: "MaskedAddInt16x32", - argLen: 3, - commutative: true, - generic: true, + { + name: "ShiftRightMaskedInt16x16", + argLen: 3, + generic: true, }, { - name: "MaskedEqualInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedInt16x16", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterInt16x32", + name: "ShiftRightSignExtendedMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x32", - argLen: 3, + name: "SignInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedLessInt16x32", - argLen: 3, + name: "SubInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt16x32", + name: "SubMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x32", - argLen: 3, + name: "XorInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt16x32", - argLen: 3, + name: "AbsoluteInt16x32", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AddInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x32", + name: "AddMaskedInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x32", - argLen: 3, + name: "EqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x32", + name: "EqualMaskedInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x32", - argLen: 3, + name: "GreaterInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedPopCountInt16x32", + name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubInt16x32", + name: "GreaterEqualMaskedInt16x32", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftInt16x32", + name: "GreaterMaskedInt16x32", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt16x32", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt16x32", - argLen: 3, + name: "LessInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt16x32", - argLen: 4, + name: "LessEqualInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt16x32", + name: "LessEqualMaskedInt16x32", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x32", + name: "LessMaskedInt16x32", argLen: 3, generic: true, }, @@ -60736,51 +60655,102 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulLowInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MulLowMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdInt16x32", argLen: 2, generic: true, }, + { + name: "PairDotProdMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "PopCountInt16x32", argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt16x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubInt16x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt16x32", argLen: 2, @@ -60791,6 +60761,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftLeftAndFillUpperFromMaskedInt16x32", + argLen: 4, + generic: true, + }, + { + name: "ShiftLeftMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftRightInt16x32", argLen: 2, @@ -60801,27 +60781,58 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftRightAndFillUpperFromMaskedInt16x32", + argLen: 4, + generic: true, + }, + { + name: "ShiftRightMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftRightSignExtendedInt16x32", argLen: 2, generic: true, }, + { + name: "ShiftRightSignExtendedMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "SubInt16x32", argLen: 2, generic: true, }, + { + name: "SubMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "AbsoluteInt16x8", argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt16x8", + argLen: 2, + generic: true, + }, { name: "AddInt16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt16x8", argLen: 2, @@ -60839,6 +60850,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt16x8", argLen: 2, @@ -60850,160 +60867,92 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt16x8", - argLen: 2, + name: "GreaterEqualMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "LessEqualInt16x8", - argLen: 2, + name: "GreaterMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt16x8", + name: "LessInt16x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt16x8", - argLen: 3, + name: "LessEqualInt16x8", + argLen: 2, generic: true, }, { - name: "MaskedLessInt16x8", + name: "LessEqualMaskedInt16x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x8", + name: "LessMaskedInt16x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x8", - argLen: 3, + name: "MaxInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt16x8", + name: "MaxMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x8", - argLen: 3, + name: "MinInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x8", + name: "MinMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x8", - argLen: 3, + name: "MulHighInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedPopCountInt16x8", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt16x8", + name: "MulHighMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftAndFillUpperFromInt16x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftRightAndFillUpperFromInt16x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaxInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt16x8", + name: "MulLowInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", - argLen: 2, + name: "MulLowMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt16x8", + name: "NotEqualInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x8", - argLen: 2, + name: "NotEqualMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, @@ -61018,6 +60967,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PairDotProdMaskedInt16x8", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt16x8", argLen: 2, @@ -61033,12 +60987,23 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt16x8", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedPairwiseAddInt16x8", argLen: 2, @@ -61054,6 +61019,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftInt16x8", argLen: 2, @@ -61080,254 +61050,207 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt16x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt16x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x8", + name: "ShiftLeftMaskedInt16x8", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt16x8", + name: "ShiftRightInt16x8", argLen: 2, generic: true, }, { - name: "SignInt16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt16x8", + argLen: 3, generic: true, }, { - name: "SubInt16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt16x8", + argLen: 4, generic: true, }, { - name: "XorInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt32x16", - argLen: 1, + name: "ShiftRightMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "AddInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt32x16", + name: "ShiftRightSignExtendedInt16x8", argLen: 2, generic: true, }, { - name: "EqualInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedMaskedInt16x8", + argLen: 3, + generic: true, }, { - name: "GreaterInt32x16", + name: "SignInt16x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x16", + name: "SubInt16x8", argLen: 2, generic: true, }, { - name: "LessInt32x16", - argLen: 2, + name: "SubMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "LessEqualInt32x16", - argLen: 2, + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x16", + argLen: 1, generic: true, }, { - name: "MaskedAbsoluteInt32x16", + name: "AbsoluteMaskedInt32x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x16", - argLen: 3, + name: "AddInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndInt32x16", + name: "AddMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x16", - argLen: 3, - generic: true, + name: "AndInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedEqualInt32x16", + name: "AndMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt32x16", - argLen: 3, + name: "AndNotInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt32x16", + name: "AndNotMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMulLowInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualInt32x16", - argLen: 3, + name: "EqualInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt32x16", + name: "EqualMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x16", - argLen: 4, + name: "GreaterInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedPopCountInt32x16", + name: "GreaterEqualInt32x16", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt32x16", + name: "GreaterEqualMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt32x16", + name: "GreaterMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x16", - argLen: 4, + name: "LessInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 4, + name: "LessEqualInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt32x16", + name: "LessEqualMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt32x16", + name: "LessMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedInt32x16", - argLen: 3, - generic: true, + name: "MaxInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSubInt32x16", - argLen: 3, - generic: true, + name: "MaxMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 4, - generic: true, + name: "MinInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt32x16", + name: "MinMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x16", + name: "MulLowInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x16", - argLen: 2, + name: "MulLowMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt32x16", + name: "NotEqualInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x16", - argLen: 2, + name: "NotEqualMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, @@ -61337,283 +61260,271 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdAccumulateInt32x16", argLen: 3, generic: true, }, + { + name: "PairDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, + }, { name: "PopCountInt32x16", argLen: 1, generic: true, }, { - name: "RotateLeftInt32x16", + name: "PopCountMaskedInt32x16", argLen: 2, generic: true, }, { - name: "RotateRightInt32x16", + name: "RotateLeftInt32x16", argLen: 2, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x16", + name: "RotateLeftMaskedInt32x16", argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, + name: "RotateRightInt32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftInt32x16", - argLen: 2, + name: "RotateRightMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x16", + name: "SaturatedPairDotProdAccumulateInt32x16", argLen: 3, generic: true, }, { - name: "ShiftRightInt32x16", - argLen: 2, + name: "SaturatedPairDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x16", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt32x16", - argLen: 2, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "SubInt32x16", + name: "ShiftLeftInt32x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + name: "ShiftLeftAndFillUpperFromInt32x16", argLen: 3, generic: true, }, { - name: "XorInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "AbsoluteInt32x4", - argLen: 1, + name: "ShiftLeftMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "AddInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightInt32x16", + argLen: 2, + generic: true, }, { - name: "AndInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt32x16", + argLen: 3, + generic: true, }, { - name: "AndNotInt32x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "EqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedInt32x16", + argLen: 3, + generic: true, }, { - name: "GreaterInt32x4", + name: "ShiftRightSignExtendedInt32x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x4", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "LessInt32x4", + name: "SubInt32x16", argLen: 2, generic: true, }, { - name: "LessEqualInt32x4", - argLen: 2, + name: "SubMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt32x4", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedAddInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "MaskedAndInt32x4", - argLen: 3, + name: "XorInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x4", + name: "XorMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt32x4", - argLen: 3, + name: "AbsoluteInt32x4", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt32x4", - argLen: 3, + name: "AbsoluteMaskedInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt32x4", - argLen: 3, + name: "AddInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x4", + name: "AddMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x4", - argLen: 3, + name: "AndInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt32x4", + name: "AndMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x4", + name: "AndNotInt32x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt32x4", + name: "AndNotMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt32x4", - argLen: 3, - generic: true, + name: "EqualInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x4", - argLen: 4, + name: "EqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, + name: "GreaterEqualInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt32x4", + name: "GreaterEqualMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt32x4", - argLen: 4, + name: "GreaterMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightInt32x4", - argLen: 3, + name: "LessInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt32x4", - argLen: 4, + name: "LessEqualInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt32x4", + name: "LessEqualMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedSubInt32x4", + name: "LessMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, - generic: true, + name: "MaxInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt32x4", + name: "MaxMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x4", + name: "MinInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x4", - argLen: 2, + name: "MinMaskedInt32x4", + argLen: 3, commutative: true, generic: true, }, @@ -61629,23 +61540,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulLowMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, + { + name: "PairDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, + }, { name: "PairwiseAddInt32x4", argLen: 2, @@ -61661,26 +61595,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt32x4", + argLen: 2, + generic: true, + }, { name: "RotateLeftInt32x4", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedInt32x4", + argLen: 3, + generic: true, + }, { name: "RotateRightInt32x4", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedInt32x4", + argLen: 3, + generic: true, + }, { name: "SaturatedPairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, + }, { name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLen: 3, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, + }, { name: "ShiftAllLeftInt32x4", argLen: 2, @@ -61707,247 +61666,199 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x4", + name: "ShiftLeftMaskedInt32x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt32x4", + name: "ShiftRightInt32x4", argLen: 2, generic: true, }, { - name: "SignInt32x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x4", + argLen: 3, generic: true, }, { - name: "SubInt32x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + name: "ShiftRightMaskedInt32x4", argLen: 3, generic: true, }, { - name: "XorInt32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt32x8", - argLen: 1, - generic: true, - }, - { - name: "AddInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt32x8", + name: "ShiftRightSignExtendedInt32x4", argLen: 2, generic: true, }, { - name: "EqualInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt32x8", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualInt32x8", + name: "SignInt32x4", argLen: 2, generic: true, }, { - name: "LessInt32x8", + name: "SubInt32x4", argLen: 2, generic: true, }, { - name: "LessEqualInt32x8", - argLen: 2, + name: "SubMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt32x8", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedAddInt32x8", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, }, { - name: "MaskedAndInt32x8", - argLen: 3, + name: "XorInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x8", + name: "XorMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt32x8", - argLen: 3, + name: "AbsoluteInt32x8", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt32x8", - argLen: 3, + name: "AbsoluteMaskedInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt32x8", - argLen: 3, + name: "AddInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x8", + name: "AddMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x8", - argLen: 3, + name: "AndInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt32x8", + name: "AndMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x8", + name: "AndNotInt32x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt32x8", + name: "AndNotMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt32x8", - argLen: 3, - generic: true, + name: "EqualInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x8", - argLen: 4, + name: "EqualMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, + name: "GreaterEqualInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt32x8", + name: "GreaterEqualMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt32x8", - argLen: 4, + name: "GreaterMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightInt32x8", - argLen: 3, + name: "LessInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt32x8", - argLen: 4, + name: "LessEqualInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt32x8", + name: "LessEqualMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedSubInt32x8", + name: "LessMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, - generic: true, + name: "MaxInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt32x8", + name: "MaxMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x8", + name: "MinInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x8", - argLen: 2, + name: "MinMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, @@ -61963,23 +61874,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulLowMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, + { + name: "PairDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, + }, { name: "PairwiseAddInt32x8", argLen: 2, @@ -61995,26 +61929,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt32x8", + argLen: 2, + generic: true, + }, { name: "RotateLeftInt32x8", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedInt32x8", + argLen: 3, + generic: true, + }, { name: "RotateRightInt32x8", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedInt32x8", + argLen: 3, + generic: true, + }, { name: "SaturatedPairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, + }, { name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLen: 3, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, + }, { name: "ShiftAllLeftInt32x8", argLen: 2, @@ -62041,260 +62000,223 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt32x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x8", + name: "ShiftLeftMaskedInt32x8", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt32x8", + name: "ShiftRightInt32x8", argLen: 2, generic: true, }, { - name: "SignInt32x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x8", + argLen: 3, generic: true, }, { - name: "SubInt32x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + name: "ShiftRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "XorInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt64x2", - argLen: 1, - generic: true, - }, - { - name: "AddInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt64x2", + name: "ShiftRightSignExtendedInt32x8", argLen: 2, generic: true, }, { - name: "EqualInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt64x2", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualInt64x2", + name: "SignInt32x8", argLen: 2, generic: true, }, { - name: "LessInt64x2", + name: "SubInt32x8", argLen: 2, generic: true, }, { - name: "LessEqualInt64x2", - argLen: 2, + name: "SubMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt64x2", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, generic: true, }, { - name: "MaskedAddInt64x2", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "MaskedAndInt64x2", - argLen: 3, + name: "XorInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x2", + name: "XorMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt64x2", - argLen: 3, + name: "AbsoluteInt64x2", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt64x2", - argLen: 3, + name: "AbsoluteMaskedInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt64x2", - argLen: 3, + name: "AddInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt64x2", + name: "AddMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x2", - argLen: 3, + name: "AndInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x2", + name: "AndMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x2", - argLen: 3, + name: "AndNotInt64x2", + argLen: 2, + generic: true, + }, + { + name: "AndNotMaskedInt64x2", + argLen: 3, + generic: true, + }, + { + name: "EqualInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt64x2", + name: "EqualMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x2", + name: "GreaterInt64x2", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt64x2", - argLen: 3, + name: "GreaterEqualInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightInt64x2", + name: "GreaterEqualMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftInt64x2", + name: "GreaterMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightInt64x2", - argLen: 3, + name: "LessInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightSignExtendedInt64x2", - argLen: 3, + name: "LessEqualInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt64x2", + name: "LessEqualMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt64x2", + name: "LessMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt64x2", - argLen: 4, - generic: true, + name: "MaxInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftRightSignExtendedInt64x2", - argLen: 3, - generic: true, + name: "MaxMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedSubInt64x2", - argLen: 3, - generic: true, + name: "MinInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt64x2", + name: "MinMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt64x2", + name: "MulEvenWidenInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x2", - argLen: 2, + name: "MulEvenWidenMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x2", + name: "MulLowInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x2", - argLen: 2, + name: "MulLowMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, @@ -62304,279 +62226,261 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt64x2", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountInt64x2", argLen: 1, generic: true, }, { - name: "RotateLeftInt64x2", + name: "PopCountMaskedInt64x2", argLen: 2, generic: true, }, { - name: "RotateRightInt64x2", + name: "RotateLeftInt64x2", argLen: 2, generic: true, }, { - name: "ShiftAllLeftInt64x2", - argLen: 2, + name: "RotateLeftMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "ShiftAllRightInt64x2", + name: "RotateRightInt64x2", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt64x2", - argLen: 2, + name: "RotateRightMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftInt64x2", + name: "ShiftAllLeftInt64x2", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x2", + name: "ShiftAllLeftMaskedInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightInt64x2", + name: "ShiftAllRightInt64x2", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x2", + name: "ShiftAllRightMaskedInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "SubInt64x2", + name: "ShiftAllRightSignExtendedInt64x2", argLen: 2, generic: true, }, { - name: "XorInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt64x4", - argLen: 1, + name: "ShiftAllRightSignExtendedMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "AddInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt64x4", + name: "ShiftLeftInt64x2", argLen: 2, generic: true, }, { - name: "EqualInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualInt64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt64x2", + argLen: 4, generic: true, }, { - name: "LessInt64x4", - argLen: 2, + name: "ShiftLeftMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "LessEqualInt64x4", + name: "ShiftRightInt64x2", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt64x2", + argLen: 3, generic: true, }, { - name: "MaskedAddInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedInt64x2", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotInt64x4", + name: "ShiftRightMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedEqualInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt64x4", - argLen: 3, + name: "ShiftRightSignExtendedInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualInt64x4", + name: "ShiftRightSignExtendedMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x4", - argLen: 3, + name: "SubInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt64x4", + name: "SubMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedMaxInt64x4", - argLen: 3, + name: "XorInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt64x4", + name: "XorMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x4", - argLen: 3, + name: "AbsoluteInt64x4", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteMaskedInt64x4", + argLen: 2, + generic: true, + }, + { + name: "AddInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x4", + name: "AddMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x4", - argLen: 3, + name: "AndInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt64x4", + name: "AndMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x4", + name: "AndNotInt64x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt64x4", + name: "AndNotMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt64x4", - argLen: 3, - generic: true, + name: "EqualInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllLeftInt64x4", - argLen: 3, - generic: true, + name: "EqualMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllRightInt64x4", - argLen: 3, + name: "GreaterInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightSignExtendedInt64x4", - argLen: 3, + name: "GreaterEqualInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt64x4", + name: "GreaterEqualMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt64x4", - argLen: 4, + name: "GreaterMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightInt64x4", - argLen: 3, + name: "LessInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt64x4", - argLen: 4, + name: "LessEqualInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt64x4", + name: "LessEqualMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedSubInt64x4", + name: "LessMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x4", - argLen: 3, + name: "MaxInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxInt64x4", - argLen: 2, + name: "MaxMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, @@ -62586,315 +62490,333 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MinMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulEvenWidenInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulLowInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "MulLowMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountInt64x4", argLen: 1, generic: true, }, { - name: "RotateLeftInt64x4", + name: "PopCountMaskedInt64x4", argLen: 2, generic: true, }, { - name: "RotateRightInt64x4", + name: "RotateLeftInt64x4", argLen: 2, generic: true, }, { - name: "ShiftAllLeftInt64x4", - argLen: 2, + name: "RotateLeftMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightInt64x4", + name: "RotateRightInt64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt64x4", - argLen: 2, + name: "RotateRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftInt64x4", + name: "ShiftAllLeftInt64x4", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x4", + name: "ShiftAllLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "ShiftRightInt64x4", + name: "ShiftAllRightInt64x4", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x4", + name: "ShiftAllRightMaskedInt64x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt64x4", + name: "ShiftAllRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "SubInt64x4", - argLen: 2, + name: "ShiftAllRightSignExtendedMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "XorInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt64x4", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt64x8", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt64x4", + argLen: 3, generic: true, }, { - name: "AddInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "AndInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "AndNotInt64x8", + name: "ShiftRightInt64x4", argLen: 2, generic: true, }, { - name: "EqualInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt64x4", + argLen: 3, + generic: true, }, { - name: "GreaterInt64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualInt64x8", - argLen: 2, + name: "ShiftRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "LessInt64x8", + name: "ShiftRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "LessEqualInt64x8", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt64x8", + name: "SubInt64x4", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "MaskedAndInt64x8", - argLen: 3, + name: "XorInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x8", + name: "XorMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt64x8", - argLen: 3, + name: "AbsoluteInt64x8", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt64x8", - argLen: 3, + name: "AbsoluteMaskedInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt64x8", - argLen: 3, + name: "AddInt64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt64x8", + name: "AddMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x8", - argLen: 3, + name: "AndInt64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x8", + name: "AndMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x8", - argLen: 3, + name: "AndNotInt64x8", + argLen: 2, + generic: true, + }, + { + name: "AndNotMaskedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "EqualInt64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt64x8", + name: "EqualMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x8", + name: "GreaterInt64x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt64x8", - argLen: 3, + name: "GreaterEqualInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightInt64x8", + name: "GreaterEqualMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftInt64x8", + name: "GreaterMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightInt64x8", - argLen: 3, + name: "LessInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightSignExtendedInt64x8", - argLen: 3, + name: "LessEqualInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt64x8", + name: "LessEqualMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt64x8", + name: "LessMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt64x8", - argLen: 4, - generic: true, + name: "MaxInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftRightSignExtendedInt64x8", - argLen: 3, - generic: true, + name: "MaxMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedSubInt64x8", - argLen: 3, - generic: true, + name: "MinInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt64x8", + name: "MinMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt64x8", + name: "MulEvenWidenInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x8", - argLen: 2, + name: "MulEvenWidenMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "MulLowInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", - argLen: 2, + name: "MulLowMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, @@ -62904,42 +62826,84 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountInt64x8", argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt64x8", + argLen: 2, + generic: true, + }, { name: "RotateLeftInt64x8", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "RotateRightInt64x8", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightSignExtendedInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightSignExtendedMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt64x8", argLen: 2, @@ -62950,6 +62914,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftLeftAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftLeftMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightInt64x8", argLen: 2, @@ -62960,33 +62934,70 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftRightAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftRightMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightSignExtendedInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftRightSignExtendedMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "SubInt64x8", argLen: 2, generic: true, }, + { + name: "SubMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "XorInt64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "XorMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AbsoluteInt8x16", argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt8x16", + argLen: 2, + generic: true, + }, { name: "AddInt8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt8x16", argLen: 2, @@ -63004,6 +63015,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt8x16", argLen: 2, @@ -63015,106 +63032,68 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt8x16", - argLen: 2, + name: "GreaterEqualMaskedInt8x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt8x16", - argLen: 2, + name: "GreaterMaskedInt8x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt8x16", + name: "LessInt8x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt8x16", - argLen: 3, + name: "LessEqualInt8x16", + argLen: 2, generic: true, }, { - name: "MaskedLessInt8x16", + name: "LessEqualMaskedInt8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x16", + name: "LessMaskedInt8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x16", - argLen: 3, + name: "MaxInt8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt8x16", + name: "MaxMaskedInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x16", - argLen: 3, + name: "MinInt8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x16", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt8x16", + name: "MinMaskedInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt8x16", - argLen: 3, - generic: true, - }, - { - name: "MaxInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt8x16", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x16", - argLen: 2, + name: "NotEqualMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, @@ -63129,17 +63108,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt8x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubInt8x16", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt8x16", + argLen: 3, + generic: true, + }, { name: "SignInt8x16", argLen: 2, @@ -63150,6 +63145,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedInt8x16", + argLen: 3, + generic: true, + }, { name: "XorInt8x16", argLen: 2, @@ -63161,12 +63161,23 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt8x32", + argLen: 2, + generic: true, + }, { name: "AddInt8x32", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt8x32", argLen: 2, @@ -63184,6 +63195,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt8x32", argLen: 2, @@ -63195,106 +63212,68 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt8x32", - argLen: 2, + name: "GreaterEqualMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "LessEqualInt8x32", - argLen: 2, + name: "GreaterMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt8x32", + name: "LessInt8x32", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt8x32", - argLen: 3, + name: "LessEqualInt8x32", + argLen: 2, generic: true, }, { - name: "MaskedLessInt8x32", + name: "LessEqualMaskedInt8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x32", + name: "LessMaskedInt8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x32", - argLen: 3, + name: "MaxInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt8x32", + name: "MaxMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x32", - argLen: 3, + name: "MinInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x32", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt8x32", + name: "MinMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaxInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt8x32", + name: "NotEqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x32", - argLen: 2, + name: "NotEqualMaskedInt8x32", + argLen: 3, commutative: true, generic: true, }, @@ -63309,17 +63288,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt8x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt8x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubInt8x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt8x32", + argLen: 3, + generic: true, + }, { name: "SignInt8x32", argLen: 2, @@ -63330,6 +63325,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedInt8x32", + argLen: 3, + generic: true, + }, { name: "XorInt8x32", argLen: 2, @@ -63341,6 +63341,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt8x64", + argLen: 2, + generic: true, + }, { name: "AddInt8x64", argLen: 2, @@ -63348,104 +63353,60 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "EqualInt8x64", - argLen: 2, + name: "AddMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x64", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt8x64", - argLen: 2, - generic: true, - }, - { - name: "LessInt8x64", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt8x64", - argLen: 2, - generic: true, - }, - { - name: "MaskedAbsoluteInt8x64", - argLen: 2, - generic: true, - }, - { - name: "MaskedAddInt8x64", - argLen: 3, + name: "EqualInt8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedEqualInt8x64", + name: "EqualMaskedInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt8x64", - argLen: 3, + name: "GreaterInt8x64", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualInt8x64", - argLen: 3, + name: "GreaterEqualInt8x64", + argLen: 2, generic: true, }, { - name: "MaskedLessInt8x64", + name: "GreaterEqualMaskedInt8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x64", + name: "GreaterMaskedInt8x64", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedPopCountInt8x64", + name: "LessInt8x64", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualInt8x64", + argLen: 2, + generic: true, }, { - name: "MaskedSaturatedSubInt8x64", + name: "LessEqualMaskedInt8x64", argLen: 3, generic: true, }, { - name: "MaskedSubInt8x64", + name: "LessMaskedInt8x64", argLen: 3, generic: true, }, @@ -63456,192 +63417,161 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MinInt8x64", - argLen: 2, + name: "MaxMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt8x64", + name: "MinInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt8x64", - argLen: 1, - generic: true, - }, - { - name: "SaturatedAddInt8x64", - argLen: 2, + name: "MinMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x64", - argLen: 2, - generic: true, - }, - { - name: "SubInt8x64", - argLen: 2, - generic: true, - }, - { - name: "AddUint16x16", + name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint16x16", - argLen: 2, + name: "NotEqualMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "AndNotUint16x16", + name: "PopCountInt8x64", + argLen: 1, + generic: true, + }, + { + name: "PopCountMaskedInt8x64", argLen: 2, generic: true, }, { - name: "AverageUint16x16", + name: "SaturatedAddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x16", - argLen: 2, + name: "SaturatedAddMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "GreaterUint16x16", + name: "SaturatedSubInt8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x16", - argLen: 2, + name: "SaturatedSubMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "LessUint16x16", + name: "SubInt8x64", argLen: 2, generic: true, }, { - name: "LessEqualUint16x16", - argLen: 2, + name: "SubMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "MaskedAddUint16x16", - argLen: 3, + name: "AddUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x16", + name: "AddMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x16", - argLen: 3, + name: "AndUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualUint16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessUint16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualUint16x16", - argLen: 3, + name: "AndNotUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedMaxUint16x16", - argLen: 3, + name: "AverageUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint16x16", + name: "AverageMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x16", - argLen: 3, + name: "EqualUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x16", + name: "EqualMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x16", + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint16x16", - argLen: 3, + name: "GreaterEqualUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint16x16", + name: "GreaterEqualMaskedUint16x16", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint16x16", - argLen: 4, + name: "GreaterMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint16x16", - argLen: 3, + name: "LessUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint16x16", - argLen: 4, + name: "LessEqualUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint16x16", + name: "LessEqualMaskedUint16x16", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x16", + name: "LessMaskedUint16x16", argLen: 3, generic: true, }, @@ -63651,24 +63581,48 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint16x16", argLen: 2, @@ -63690,17 +63644,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint16x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint16x16", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftUint16x16", argLen: 2, @@ -63722,174 +63692,134 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint16x16", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x16", + name: "ShiftLeftMaskedUint16x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint16x16", + name: "ShiftRightUint16x16", argLen: 2, generic: true, }, { - name: "SubUint16x16", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x16", + argLen: 3, generic: true, }, { - name: "XorUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AverageUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x16", + argLen: 4, + generic: true, }, { - name: "EqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint16x16", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x32", + name: "ShiftRightSignExtendedUint16x16", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x32", - argLen: 2, + name: "ShiftRightSignExtendedMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "LessUint16x32", + name: "SubUint16x16", argLen: 2, generic: true, }, { - name: "LessEqualUint16x32", - argLen: 2, + name: "SubMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "MaskedAddUint16x32", - argLen: 3, + name: "XorUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x32", - argLen: 3, + name: "AddUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x32", + name: "AddMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxUint16x32", - argLen: 3, + name: "AverageUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint16x32", + name: "AverageMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x32", - argLen: 3, + name: "EqualUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x32", + name: "EqualMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x32", + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint16x32", - argLen: 3, + name: "GreaterEqualUint16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint16x32", + name: "GreaterEqualMaskedUint16x32", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint16x32", - argLen: 4, + name: "GreaterMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint16x32", - argLen: 3, + name: "LessUint16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint16x32", - argLen: 4, + name: "LessEqualUint16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint16x32", + name: "LessEqualMaskedUint16x32", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x32", + name: "LessMaskedUint16x32", argLen: 3, generic: true, }, @@ -63899,40 +63829,80 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountUint16x32", argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint16x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint16x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x32", argLen: 2, @@ -63944,179 +63914,139 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint16x32", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x32", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x32", + name: "ShiftLeftMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint16x32", + name: "ShiftRightUint16x32", argLen: 2, generic: true, }, { - name: "SubUint16x32", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x32", + argLen: 3, generic: true, }, { - name: "AddUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint16x32", + argLen: 4, generic: true, }, { - name: "AverageUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint16x32", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x8", + name: "ShiftRightSignExtendedUint16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x8", - argLen: 2, + name: "ShiftRightSignExtendedMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "LessUint16x8", + name: "SubUint16x32", argLen: 2, generic: true, }, { - name: "LessEqualUint16x8", - argLen: 2, + name: "SubMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "MaskedAddUint16x8", - argLen: 3, + name: "AddUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x8", + name: "AddMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x8", - argLen: 3, + name: "AndUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualUint16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessUint16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualUint16x8", - argLen: 3, + name: "AndNotUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxUint16x8", - argLen: 3, + name: "AverageUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint16x8", + name: "AverageMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x8", - argLen: 3, + name: "EqualUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x8", + name: "EqualMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x8", + name: "GreaterUint16x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint16x8", - argLen: 3, + name: "GreaterEqualUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint16x8", + name: "GreaterEqualMaskedUint16x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint16x8", - argLen: 4, + name: "GreaterMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint16x8", - argLen: 3, + name: "LessUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint16x8", - argLen: 4, + name: "LessEqualUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint16x8", + name: "LessEqualMaskedUint16x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x8", + name: "LessMaskedUint16x8", argLen: 3, generic: true, }, @@ -64126,24 +64056,48 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint16x8", argLen: 2, @@ -64165,17 +64119,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint16x8", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint16x8", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftUint16x8", argLen: 2, @@ -64197,211 +64167,168 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint16x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x8", + name: "ShiftLeftMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint16x8", + name: "ShiftRightUint16x8", argLen: 2, generic: true, }, { - name: "SubUint16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x8", + argLen: 3, generic: true, }, { - name: "XorUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint32x16", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint16x8", + argLen: 4, generic: true, }, { - name: "EqualUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "GreaterUint32x16", + name: "ShiftRightSignExtendedUint16x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x16", - argLen: 2, + name: "ShiftRightSignExtendedMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "LessUint32x16", + name: "SubUint16x8", argLen: 2, generic: true, }, { - name: "LessEqualUint32x16", - argLen: 2, + name: "SubMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "MaskedAddUint32x16", - argLen: 3, + name: "XorUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndUint32x16", - argLen: 3, + name: "AddUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint32x16", + name: "AddMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x16", - argLen: 3, - generic: true, + name: "AndUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedGreaterEqualUint32x16", - argLen: 3, - generic: true, + name: "AndMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedLessUint32x16", - argLen: 3, + name: "AndNotUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint32x16", + name: "AndNotMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualUint32x16", - argLen: 3, + name: "EqualUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint32x16", + name: "EqualMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x16", + name: "GreaterUint32x16", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint32x16", - argLen: 3, + name: "GreaterEqualUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightUint32x16", + name: "GreaterEqualMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftLeftUint32x16", + name: "GreaterMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightUint32x16", - argLen: 3, + name: "LessUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint32x16", - argLen: 4, + name: "LessEqualUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint32x16", + name: "LessEqualMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedSubUint32x16", + name: "LessMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 4, - generic: true, + name: "MaxUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint32x16", + name: "MaxMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x16", + name: "MinUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x16", - argLen: 2, + name: "MinMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, @@ -64412,252 +64339,252 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "OrUint32x16", - argLen: 2, + name: "NotEqualMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "PopCountUint32x16", - argLen: 1, - generic: true, - }, - { - name: "RotateLeftUint32x16", - argLen: 2, - generic: true, + name: "OrUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightUint32x16", - argLen: 2, - generic: true, + name: "OrMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 3, + name: "PopCountUint32x16", + argLen: 1, generic: true, }, { - name: "ShiftLeftUint32x16", + name: "PopCountMaskedUint32x16", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x16", - argLen: 3, - generic: true, - }, - { - name: "ShiftRightUint32x16", + name: "RotateLeftUint32x16", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x16", + name: "RotateLeftMaskedUint32x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "SubUint32x16", + name: "RotateRightUint32x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + name: "RotateRightMaskedUint32x16", argLen: 3, generic: true, }, { - name: "XorUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint32x4", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, }, { - name: "AndNotUint32x4", - argLen: 2, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", + argLen: 4, generic: true, }, { - name: "EqualUint32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftUint32x16", + argLen: 2, + generic: true, }, { - name: "GreaterUint32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint32x16", + argLen: 4, generic: true, }, { - name: "LessUint32x4", - argLen: 2, + name: "ShiftLeftMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "LessEqualUint32x4", + name: "ShiftRightUint32x16", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint32x16", + argLen: 3, + generic: true, }, { - name: "MaskedAndUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x16", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotUint32x4", + name: "ShiftRightMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedEqualUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedUint32x16", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint32x4", + name: "ShiftRightSignExtendedMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x4", - argLen: 3, + name: "SubUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedLessUint32x4", + name: "SubMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x4", + name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x4", - argLen: 3, + name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", + argLen: 4, + generic: true, + }, + { + name: "XorUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint32x4", + name: "XorMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x4", + name: "AddUint32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x4", + name: "AndUint32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x4", + name: "AndNotUint32x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint32x4", + name: "AndNotMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint32x4", - argLen: 3, + name: "EqualUint32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 4, + name: "GreaterEqualUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint32x4", + name: "GreaterEqualMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint32x4", - argLen: 4, + name: "GreaterMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint32x4", - argLen: 3, + name: "LessUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint32x4", - argLen: 4, + name: "LessEqualUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint32x4", + name: "LessEqualMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedSubUint32x4", + name: "LessMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 4, - generic: true, + name: "MaxUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint32x4", + name: "MaxMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x4", + name: "MinUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x4", - argLen: 2, + name: "MinMaskedUint32x4", + argLen: 3, commutative: true, generic: true, }, @@ -64673,12 +64600,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairwiseAddUint32x4", argLen: 2, @@ -64694,21 +64633,41 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint32x4", + argLen: 2, + generic: true, + }, { name: "RotateLeftUint32x4", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "RotateRightUint32x4", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLen: 3, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", + argLen: 4, + generic: true, + }, { name: "ShiftAllLeftUint32x4", argLen: 2, @@ -64730,216 +64689,184 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint32x4", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x4", + name: "ShiftLeftMaskedUint32x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "SubUint32x4", + name: "ShiftRightUint32x4", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + name: "ShiftRightAndFillUpperFromUint32x4", argLen: 3, generic: true, }, { - name: "XorUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x4", + argLen: 4, + generic: true, }, { - name: "AndUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "AndNotUint32x8", + name: "ShiftRightSignExtendedUint32x4", argLen: 2, generic: true, }, { - name: "EqualUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "GreaterUint32x8", + name: "SubUint32x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x8", - argLen: 2, + name: "SubMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "LessUint32x8", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, generic: true, }, { - name: "LessEqualUint32x8", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", + argLen: 4, generic: true, }, { - name: "MaskedAddUint32x8", - argLen: 3, + name: "XorUint32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndUint32x8", + name: "XorMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x8", - argLen: 3, - generic: true, + name: "AddUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedEqualUint32x8", + name: "AddMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x8", - argLen: 3, - generic: true, + name: "AndUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedGreaterEqualUint32x8", - argLen: 3, - generic: true, + name: "AndMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedLessUint32x8", - argLen: 3, + name: "AndNotUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint32x8", + name: "AndNotMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualUint32x8", - argLen: 3, + name: "EqualUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint32x8", + name: "EqualMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x8", + name: "GreaterUint32x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint32x8", - argLen: 3, + name: "GreaterEqualUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightUint32x8", + name: "GreaterEqualMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftLeftUint32x8", + name: "GreaterMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightUint32x8", - argLen: 3, + name: "LessUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint32x8", - argLen: 4, + name: "LessEqualUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint32x8", + name: "LessEqualMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint32x8", + name: "LessMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, - generic: true, + name: "MaxUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint32x8", + name: "MaxMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x8", + name: "MinUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x8", - argLen: 2, + name: "MinMaskedUint32x8", + argLen: 3, commutative: true, generic: true, }, @@ -64955,12 +64882,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairwiseAddUint32x8", argLen: 2, @@ -64977,251 +64916,227 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RotateLeftUint32x8", + name: "PopCountMaskedUint32x8", argLen: 2, generic: true, }, { - name: "RotateRightUint32x8", + name: "RotateLeftUint32x8", argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + name: "RotateLeftMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint32x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightUint32x8", + name: "RotateRightUint32x8", argLen: 2, generic: true, }, { - name: "ShiftLeftUint32x8", - argLen: 2, + name: "RotateRightMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x8", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLen: 3, generic: true, }, { - name: "ShiftRightUint32x8", - argLen: 2, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x8", - argLen: 3, + name: "ShiftAllLeftUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftRightSignExtendedUint32x8", + name: "ShiftAllRightUint32x8", argLen: 2, generic: true, }, { - name: "SubUint32x8", + name: "ShiftLeftUint32x8", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x8", + name: "ShiftLeftAndFillUpperFromUint32x8", argLen: 3, generic: true, }, { - name: "XorUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint32x8", + argLen: 4, + generic: true, }, { - name: "AndUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "AndNotUint64x2", + name: "ShiftRightUint32x8", argLen: 2, generic: true, }, { - name: "EqualUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterUint64x2", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint32x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x2", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint32x8", + argLen: 4, generic: true, }, { - name: "LessUint64x2", - argLen: 2, + name: "ShiftRightMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x2", + name: "ShiftRightSignExtendedUint32x8", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotUint64x2", + name: "ShiftRightSignExtendedMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedEqualUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "SubUint32x8", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x2", + name: "SubMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x2", + name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLen: 3, generic: true, }, { - name: "MaskedLessUint64x2", - argLen: 3, + name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", + argLen: 4, generic: true, }, { - name: "MaskedLessEqualUint64x2", - argLen: 3, - generic: true, + name: "XorUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedMaxUint64x2", + name: "XorMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x2", - argLen: 3, + name: "AddUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x2", + name: "AddMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x2", - argLen: 3, + name: "AndUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint64x2", + name: "AndMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x2", + name: "AndNotUint64x2", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint64x2", + name: "AndNotMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint64x2", - argLen: 3, - generic: true, + name: "EqualUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllLeftUint64x2", - argLen: 3, + name: "EqualMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightUint64x2", - argLen: 3, + name: "GreaterEqualUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint64x2", + name: "GreaterEqualMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint64x2", - argLen: 4, + name: "GreaterMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint64x2", - argLen: 3, + name: "LessUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint64x2", - argLen: 4, + name: "LessEqualUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint64x2", + name: "LessEqualMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedSubUint64x2", + name: "LessMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x2", - argLen: 3, + name: "MaxUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint64x2", - argLen: 2, + name: "MaxMaskedUint64x2", + argLen: 3, commutative: true, generic: true, }, @@ -65231,18 +65146,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MinMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulEvenWidenUint64x2", argLen: 2, commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint64x2", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint64x2", argLen: 2, @@ -65250,252 +65183,240 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, - generic: true, + name: "OrMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftUint64x2", - argLen: 2, + name: "PopCountUint64x2", + argLen: 1, generic: true, }, { - name: "RotateRightUint64x2", + name: "PopCountMaskedUint64x2", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x2", + name: "RotateLeftUint64x2", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint64x2", - argLen: 2, + name: "RotateLeftMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x2", + name: "RotateRightUint64x2", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x2", + name: "RotateRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x2", + name: "ShiftAllLeftUint64x2", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x2", + name: "ShiftAllLeftMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint64x2", + name: "ShiftAllRightUint64x2", argLen: 2, generic: true, }, { - name: "SubUint64x2", - argLen: 2, + name: "ShiftAllRightMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "XorUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint64x4", + name: "ShiftLeftUint64x2", argLen: 2, generic: true, }, { - name: "EqualUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x2", + argLen: 4, generic: true, }, { - name: "LessUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x4", + name: "ShiftRightUint64x2", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint64x2", + argLen: 3, + generic: true, }, { - name: "MaskedAndUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x2", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotUint64x4", + name: "ShiftRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedEqualUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedUint64x2", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x4", + name: "ShiftRightSignExtendedMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x4", - argLen: 3, + name: "SubUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedLessUint64x4", + name: "SubMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x4", - argLen: 3, - generic: true, + name: "XorUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedMaxUint64x4", + name: "XorMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x4", - argLen: 3, + name: "AddUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x4", + name: "AddMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x4", - argLen: 3, + name: "AndUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint64x4", + name: "AndMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x4", + name: "AndNotUint64x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint64x4", + name: "AndNotMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint64x4", - argLen: 3, + name: "EqualUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint64x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftUint64x4", + name: "GreaterEqualMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightUint64x4", + name: "GreaterMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftUint64x4", - argLen: 3, + name: "LessUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint64x4", - argLen: 4, + name: "LessEqualUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightUint64x4", + name: "LessEqualMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedUint64x4", + name: "LessMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedSubUint64x4", - argLen: 3, - generic: true, + name: "MaxUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint64x4", + name: "MaxMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint64x4", + name: "MinUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x4", - argLen: 2, + name: "MinMaskedUint64x4", + argLen: 3, commutative: true, generic: true, }, @@ -65505,12 +65426,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint64x4", argLen: 2, @@ -65518,246 +65451,228 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, - generic: true, + name: "OrMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftUint64x4", - argLen: 2, + name: "PopCountUint64x4", + argLen: 1, generic: true, }, { - name: "RotateRightUint64x4", + name: "PopCountMaskedUint64x4", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x4", + name: "RotateLeftUint64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint64x4", - argLen: 2, + name: "RotateLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x4", + name: "RotateRightUint64x4", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x4", + name: "RotateRightMaskedUint64x4", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x4", + name: "ShiftAllLeftUint64x4", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x4", + name: "ShiftAllLeftMaskedUint64x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint64x4", + name: "ShiftAllRightUint64x4", argLen: 2, generic: true, }, { - name: "SubUint64x4", - argLen: 2, + name: "ShiftAllRightMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "XorUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint64x8", + name: "ShiftLeftUint64x4", argLen: 2, generic: true, }, { - name: "EqualUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterUint64x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint64x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x4", + argLen: 4, generic: true, }, { - name: "LessUint64x8", - argLen: 2, + name: "ShiftLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x8", + name: "ShiftRightUint64x4", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint64x4", + argLen: 3, + generic: true, }, { - name: "MaskedAndUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x4", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotUint64x8", + name: "ShiftRightMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedUint64x4", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x8", + name: "ShiftRightSignExtendedMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x8", - argLen: 3, + name: "SubUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedLessUint64x8", + name: "SubMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x8", - argLen: 3, - generic: true, + name: "XorUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedMaxUint64x8", + name: "XorMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x8", - argLen: 3, + name: "AddUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x8", + name: "AddMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x8", - argLen: 3, + name: "AndUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint64x8", + name: "AndMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x8", + name: "AndNotUint64x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint64x8", + name: "AndNotMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint64x8", - argLen: 3, - generic: true, + name: "EqualUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllLeftUint64x8", - argLen: 3, + name: "EqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightUint64x8", - argLen: 3, + name: "GreaterEqualUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint64x8", + name: "GreaterEqualMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint64x8", - argLen: 4, + name: "GreaterMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint64x8", - argLen: 3, + name: "LessUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint64x8", - argLen: 4, + name: "LessEqualUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint64x8", + name: "LessEqualMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint64x8", + name: "LessMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x8", - argLen: 3, + name: "MaxUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint64x8", - argLen: 2, + name: "MaxMaskedUint64x8", + argLen: 3, commutative: true, generic: true, }, @@ -65767,49 +65682,98 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MinMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulEvenWidenUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountUint64x8", argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint64x8", + argLen: 2, + generic: true, + }, { name: "RotateLeftUint64x8", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "RotateRightUint64x8", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftUint64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint64x8", argLen: 2, @@ -65820,6 +65784,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftLeftAndFillUpperFromMaskedUint64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftLeftMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightUint64x8", argLen: 2, @@ -65830,28 +65804,60 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftRightAndFillUpperFromMaskedUint64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftRightMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightSignExtendedUint64x8", argLen: 2, generic: true, }, + { + name: "ShiftRightSignExtendedMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "SubUint64x8", argLen: 2, generic: true, }, + { + name: "SubMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "XorUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "XorMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddUint8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndUint8x16", argLen: 2, @@ -65869,6 +65875,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AverageMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "EqualUint8x16", argLen: 2, @@ -65876,132 +65888,94 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldMulUint8x16", - argLen: 2, - generic: true, + name: "EqualMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterUint8x16", + name: "GaloisFieldMulUint8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x16", - argLen: 2, + name: "GaloisFieldMulMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "LessUint8x16", + name: "GreaterUint8x16", argLen: 2, generic: true, }, { - name: "LessEqualUint8x16", + name: "GreaterEqualUint8x16", argLen: 2, generic: true, }, { - name: "MaskedAddUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "MaskedGaloisFieldMulUint8x16", + name: "GreaterMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterUint8x16", - argLen: 3, + name: "LessUint8x16", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x16", - argLen: 3, + name: "LessEqualUint8x16", + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x16", + name: "LessEqualMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint8x16", + name: "LessMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint8x16", - argLen: 3, + name: "MaxUint8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint8x16", + name: "MaxMaskedUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint8x16", - argLen: 3, + name: "MinUint8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountUint8x16", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddUint8x16", + name: "MinMaskedUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubUint8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubUint8x16", - argLen: 3, - generic: true, - }, - { - name: "MaxUint8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinUint8x16", + name: "NotEqualUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint8x16", - argLen: 2, + name: "NotEqualMaskedUint8x16", + argLen: 3, commutative: true, generic: true, }, @@ -66016,27 +65990,53 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint8x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint8x16", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "SubUint8x16", argLen: 2, generic: true, }, + { + name: "SubMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "XorUint8x16", argLen: 2, @@ -66049,6 +66049,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndUint8x32", argLen: 2, @@ -66066,6 +66072,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AverageMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "EqualUint8x32", argLen: 2, @@ -66073,132 +66085,94 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldMulUint8x32", - argLen: 2, - generic: true, + name: "EqualMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterUint8x32", + name: "GaloisFieldMulUint8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x32", - argLen: 2, + name: "GaloisFieldMulMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "LessUint8x32", + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "LessEqualUint8x32", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "MaskedAddUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "MaskedGaloisFieldMulUint8x32", + name: "GreaterMaskedUint8x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterUint8x32", - argLen: 3, + name: "LessUint8x32", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x32", - argLen: 3, + name: "LessEqualUint8x32", + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x32", + name: "LessEqualMaskedUint8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint8x32", + name: "LessMaskedUint8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxUint8x32", - argLen: 3, + name: "MaxUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint8x32", + name: "MaxMaskedUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint8x32", - argLen: 3, + name: "MinUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountUint8x32", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddUint8x32", + name: "MinMaskedUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubUint8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubUint8x32", - argLen: 3, - generic: true, - }, - { - name: "MaxUint8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinUint8x32", + name: "NotEqualUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint8x32", - argLen: 2, + name: "NotEqualMaskedUint8x32", + argLen: 3, commutative: true, generic: true, }, @@ -66213,27 +66187,53 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint8x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint8x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint8x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint8x32", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", + argLen: 3, + generic: true, + }, { name: "SubUint8x32", argLen: 2, generic: true, }, + { + name: "SubMaskedUint8x32", + argLen: 3, + generic: true, + }, { name: "XorUint8x32", argLen: 2, @@ -66246,12 +66246,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AverageUint8x64", argLen: 2, commutative: true, generic: true, }, + { + name: "AverageMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, { name: "EqualUint8x64", argLen: 2, @@ -66259,138 +66271,105 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldMulUint8x64", - argLen: 2, - generic: true, + name: "EqualMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterUint8x64", + name: "GaloisFieldMulUint8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x64", - argLen: 2, + name: "GaloisFieldMulMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "LessUint8x64", + name: "GreaterUint8x64", argLen: 2, generic: true, }, { - name: "LessEqualUint8x64", + name: "GreaterEqualUint8x64", argLen: 2, generic: true, }, { - name: "MaskedAddUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "MaskedGaloisFieldMulUint8x64", + name: "GreaterMaskedUint8x64", argLen: 3, generic: true, }, { - name: "MaskedGreaterUint8x64", - argLen: 3, + name: "LessUint8x64", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x64", - argLen: 3, + name: "LessEqualUint8x64", + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x64", + name: "LessEqualMaskedUint8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint8x64", + name: "LessMaskedUint8x64", argLen: 3, generic: true, }, { - name: "MaskedMaxUint8x64", - argLen: 3, + name: "MaxUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint8x64", + name: "MaxMaskedUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint8x64", - argLen: 3, + name: "MinUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountUint8x64", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddUint8x64", + name: "MinMaskedUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubUint8x64", - argLen: 3, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubUint8x64", - argLen: 3, - generic: true, - }, - { - name: "MaxUint8x64", + name: "NotEqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint8x64", - argLen: 2, + name: "NotEqualMaskedUint8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint8x64", + argLen: 1, + generic: true, }, { - name: "PopCountUint8x64", - argLen: 1, + name: "PopCountMaskedUint8x64", + argLen: 2, generic: true, }, { @@ -66399,101 +66378,110 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint8x64", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint8x64", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLen: 2, generic: true, }, { - name: "SubUint8x64", - argLen: 2, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "CeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, + name: "SubUint8x64", + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, + name: "SubMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x16", + name: "DiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + name: "DiffWithFloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + name: "DiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + name: "DiffWithRoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + name: "DiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x16", + name: "DiffWithTruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x16", + name: "FloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x16", + name: "FloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, @@ -66505,91 +66493,91 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "RoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat32x4", + name: "TruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x4", + name: "TruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "CeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "CeilWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + name: "DiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + name: "DiffWithRoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x4", + name: "DiffWithTruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x4", + name: "FloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x4", + name: "FloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, @@ -66601,99 +66589,99 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "RoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat32x8", + name: "TruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x8", + name: "TruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "CeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x8", + name: "CeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "DiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "DiffWithCeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Get128Float32x8", + name: "DiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + name: "DiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + name: "DiffWithTruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x8", + name: "FloorWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x8", + name: "FloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x8", + name: "Get128Float32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { @@ -66703,97 +66691,97 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Float32x8", + name: "RoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "Set128Float32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x2", + name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x2", + name: "TruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "CeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "CeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "DiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "DiffWithCeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x2", + name: "DiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + name: "DiffWithFloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + name: "DiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + name: "DiffWithRoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x2", + name: "FloorWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x2", + name: "FloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, @@ -66805,99 +66793,99 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "RoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x4", + name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x4", + name: "TruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "CeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Get128Float64x4", + name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x4", + name: "DiffWithFloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x4", + name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x4", + name: "DiffWithRoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x4", + name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x4", + name: "DiffWithTruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x4", + name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x4", + name: "Get128Float64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { @@ -66907,97 +66895,97 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Float64x4", + name: "RoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "Set128Float64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x8", + name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x8", + name: "TruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "CeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "DiffWithCeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x8", + name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x8", + name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x8", + name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x8", + name: "DiffWithTruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x8", + name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x8", + name: "FloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, @@ -67009,27 +66997,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "RoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Get128Int16x16", + name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", + name: "TruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt16x16", + name: "Get128Int16x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67045,19 +67033,19 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", + name: "ShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt16x32", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", auxType: auxInt8, argLen: 3, generic: true, @@ -67069,27 +67057,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x32", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "GetElemInt16x8", + name: "ShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt16x8", + name: "GetElemInt16x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67105,39 +67093,33 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedRotateAllLeftInt32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightInt32x16", + name: "ShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt32x16", + name: "RotateAllLeftInt32x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x16", + name: "RotateAllLeftMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67147,51 +67129,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x16", + name: "RotateAllRightMaskedInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x16", + name: "ShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt32x4", + name: "ShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt32x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", + name: "GetElemInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt32x4", + name: "RotateAllLeftInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x4", + name: "RotateAllLeftMaskedInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67201,57 +67183,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemInt32x4", + name: "RotateAllRightMaskedInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x4", + name: "SetElemInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x4", + name: "ShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int32x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt32x8", + name: "ShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt32x8", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", + name: "Get128Int32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt32x8", + name: "RotateAllLeftInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x8", + name: "RotateAllLeftMaskedInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67261,57 +67243,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Int32x8", + name: "RotateAllRightMaskedInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x8", + name: "Set128Int32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x8", + name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt64x2", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt64x2", + name: "ShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", + name: "GetElemInt64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt64x2", + name: "RotateAllLeftInt64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x2", + name: "RotateAllLeftMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67321,57 +67303,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemInt64x2", + name: "RotateAllRightMaskedInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x2", + name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x2", + name: "ShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int64x4", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt64x4", + name: "ShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", + name: "Get128Int64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt64x4", + name: "RotateAllLeftInt64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x4", + name: "RotateAllLeftMaskedInt64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67381,51 +67363,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Int64x4", + name: "RotateAllRightMaskedInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x4", + name: "Set128Int64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x4", + name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllLeftInt64x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightInt64x8", + name: "ShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt64x8", + name: "RotateAllLeftInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x8", + name: "RotateAllLeftMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67435,57 +67417,63 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x8", + name: "RotateAllRightMaskedInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x8", + name: "ShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt8x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "SetElemInt8x16", + name: "ShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int8x32", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "GetElemInt8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "Set128Int8x32", + name: "SetElemInt8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint16x16", + name: "Get128Int8x32", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", + name: "Set128Int8x32", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint16x16", + name: "Get128Uint16x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67501,19 +67489,19 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", + name: "ShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, @@ -67525,27 +67513,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "GetElemUint16x8", + name: "ShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint16x8", + name: "GetElemUint16x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67561,39 +67549,33 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedRotateAllLeftUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightUint32x16", + name: "ShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint32x16", + name: "RotateAllLeftUint32x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint32x16", + name: "RotateAllLeftMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67603,51 +67585,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x16", + name: "RotateAllRightMaskedUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x16", + name: "ShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint32x4", + name: "ShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint32x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", + name: "GetElemUint32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint32x4", + name: "RotateAllLeftUint32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint32x4", + name: "RotateAllLeftMaskedUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67657,57 +67639,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemUint32x4", + name: "RotateAllRightMaskedUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x4", + name: "SetElemUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint32x8", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint32x8", + name: "ShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint32x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", + name: "Get128Uint32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint32x8", + name: "RotateAllLeftUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint32x8", + name: "RotateAllLeftMaskedUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67717,57 +67699,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Uint32x8", + name: "RotateAllRightMaskedUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x8", + name: "Set128Uint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemUint64x2", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint64x2", + name: "ShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", + name: "GetElemUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint64x2", + name: "RotateAllLeftUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint64x2", + name: "RotateAllLeftMaskedUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67777,57 +67759,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemUint64x2", + name: "RotateAllRightMaskedUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x2", + name: "SetElemUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x2", + name: "ShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint64x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint64x4", + name: "ShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", + name: "Get128Uint64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint64x4", + name: "RotateAllLeftUint64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint64x4", + name: "RotateAllLeftMaskedUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67837,51 +67819,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Uint64x4", + name: "RotateAllRightMaskedUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x4", + name: "Set128Uint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x4", + name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllLeftUint64x8", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightUint64x8", + name: "ShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint64x8", + name: "RotateAllLeftUint64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint64x8", + name: "RotateAllLeftMaskedUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67890,18 +67872,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "RotateAllRightMaskedUint64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", + auxType: auxInt8, + argLen: 3, + generic: true, + }, { name: "ShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", + auxType: auxInt8, + argLen: 3, + generic: true, + }, { name: "GaloisFieldAffineTransformUint8x16", auxType: auxInt8, @@ -67915,21 +67915,21 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GetElemUint8x16", + name: "GaloisFieldAffineTransformInversedMaskedUint8x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformUint8x16", + name: "GaloisFieldAffineTransformMaskedUint8x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformInversedUint8x16", + name: "GetElemUint8x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67951,21 +67951,21 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Get128Uint8x32", + name: "GaloisFieldAffineTransformInversedMaskedUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformUint8x32", + name: "GaloisFieldAffineTransformMaskedUint8x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformInversedUint8x32", + name: "Get128Uint8x32", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67987,13 +67987,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedGaloisFieldAffineTransformUint8x64", + name: "GaloisFieldAffineTransformInversedMaskedUint8x64", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformInversedUint8x64", + name: "GaloisFieldAffineTransformMaskedUint8x64", auxType: auxInt8, argLen: 3, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2e6a9dfaec1690..2e27077e81926b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -589,6 +589,30 @@ func rewriteValueAMD64(v *Value) bool { case OpAbsoluteInt8x64: v.Op = OpAMD64VPABSB512 return true + case OpAbsoluteMaskedInt16x16: + return rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v) + case OpAbsoluteMaskedInt16x32: + return rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v) + case OpAbsoluteMaskedInt16x8: + return rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v) + case OpAbsoluteMaskedInt32x16: + return rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v) + case OpAbsoluteMaskedInt32x4: + return rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v) + case OpAbsoluteMaskedInt32x8: + return rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v) + case OpAbsoluteMaskedInt64x2: + return rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v) + case OpAbsoluteMaskedInt64x4: + return rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v) + case OpAbsoluteMaskedInt64x8: + return rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v) + case OpAbsoluteMaskedInt8x16: + return rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v) + case OpAbsoluteMaskedInt8x32: + return rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v) + case OpAbsoluteMaskedInt8x64: + return rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v) case OpAdd16: v.Op = OpAMD64ADDL return true @@ -661,6 +685,66 @@ func rewriteValueAMD64(v *Value) bool { case OpAddInt8x64: v.Op = OpAMD64VPADDB512 return true + case OpAddMaskedFloat32x16: + return rewriteValueAMD64_OpAddMaskedFloat32x16(v) + case OpAddMaskedFloat32x4: + return rewriteValueAMD64_OpAddMaskedFloat32x4(v) + case OpAddMaskedFloat32x8: + return rewriteValueAMD64_OpAddMaskedFloat32x8(v) + case OpAddMaskedFloat64x2: + return rewriteValueAMD64_OpAddMaskedFloat64x2(v) + case OpAddMaskedFloat64x4: + return rewriteValueAMD64_OpAddMaskedFloat64x4(v) + case OpAddMaskedFloat64x8: + return rewriteValueAMD64_OpAddMaskedFloat64x8(v) + case OpAddMaskedInt16x16: + return rewriteValueAMD64_OpAddMaskedInt16x16(v) + case OpAddMaskedInt16x32: + return rewriteValueAMD64_OpAddMaskedInt16x32(v) + case OpAddMaskedInt16x8: + return rewriteValueAMD64_OpAddMaskedInt16x8(v) + case OpAddMaskedInt32x16: + return rewriteValueAMD64_OpAddMaskedInt32x16(v) + case OpAddMaskedInt32x4: + return rewriteValueAMD64_OpAddMaskedInt32x4(v) + case OpAddMaskedInt32x8: + return rewriteValueAMD64_OpAddMaskedInt32x8(v) + case OpAddMaskedInt64x2: + return rewriteValueAMD64_OpAddMaskedInt64x2(v) + case OpAddMaskedInt64x4: + return rewriteValueAMD64_OpAddMaskedInt64x4(v) + case OpAddMaskedInt64x8: + return rewriteValueAMD64_OpAddMaskedInt64x8(v) + case OpAddMaskedInt8x16: + return rewriteValueAMD64_OpAddMaskedInt8x16(v) + case OpAddMaskedInt8x32: + return rewriteValueAMD64_OpAddMaskedInt8x32(v) + case OpAddMaskedInt8x64: + return rewriteValueAMD64_OpAddMaskedInt8x64(v) + case OpAddMaskedUint16x16: + return rewriteValueAMD64_OpAddMaskedUint16x16(v) + case OpAddMaskedUint16x32: + return rewriteValueAMD64_OpAddMaskedUint16x32(v) + case OpAddMaskedUint16x8: + return rewriteValueAMD64_OpAddMaskedUint16x8(v) + case OpAddMaskedUint32x16: + return rewriteValueAMD64_OpAddMaskedUint32x16(v) + case OpAddMaskedUint32x4: + return rewriteValueAMD64_OpAddMaskedUint32x4(v) + case OpAddMaskedUint32x8: + return rewriteValueAMD64_OpAddMaskedUint32x8(v) + case OpAddMaskedUint64x2: + return rewriteValueAMD64_OpAddMaskedUint64x2(v) + case OpAddMaskedUint64x4: + return rewriteValueAMD64_OpAddMaskedUint64x4(v) + case OpAddMaskedUint64x8: + return rewriteValueAMD64_OpAddMaskedUint64x8(v) + case OpAddMaskedUint8x16: + return rewriteValueAMD64_OpAddMaskedUint8x16(v) + case OpAddMaskedUint8x32: + return rewriteValueAMD64_OpAddMaskedUint8x32(v) + case OpAddMaskedUint8x64: + return rewriteValueAMD64_OpAddMaskedUint8x64(v) case OpAddPtr: v.Op = OpAMD64ADDQ return true @@ -759,6 +843,30 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt8x32: v.Op = OpAMD64VPAND256 return true + case OpAndMaskedInt32x16: + return rewriteValueAMD64_OpAndMaskedInt32x16(v) + case OpAndMaskedInt32x4: + return rewriteValueAMD64_OpAndMaskedInt32x4(v) + case OpAndMaskedInt32x8: + return rewriteValueAMD64_OpAndMaskedInt32x8(v) + case OpAndMaskedInt64x2: + return rewriteValueAMD64_OpAndMaskedInt64x2(v) + case OpAndMaskedInt64x4: + return rewriteValueAMD64_OpAndMaskedInt64x4(v) + case OpAndMaskedInt64x8: + return rewriteValueAMD64_OpAndMaskedInt64x8(v) + case OpAndMaskedUint32x16: + return rewriteValueAMD64_OpAndMaskedUint32x16(v) + case OpAndMaskedUint32x4: + return rewriteValueAMD64_OpAndMaskedUint32x4(v) + case OpAndMaskedUint32x8: + return rewriteValueAMD64_OpAndMaskedUint32x8(v) + case OpAndMaskedUint64x2: + return rewriteValueAMD64_OpAndMaskedUint64x2(v) + case OpAndMaskedUint64x4: + return rewriteValueAMD64_OpAndMaskedUint64x4(v) + case OpAndMaskedUint64x8: + return rewriteValueAMD64_OpAndMaskedUint64x8(v) case OpAndNotInt16x16: v.Op = OpAMD64VPANDN256 return true @@ -789,6 +897,30 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotInt8x32: v.Op = OpAMD64VPANDN256 return true + case OpAndNotMaskedInt32x16: + return rewriteValueAMD64_OpAndNotMaskedInt32x16(v) + case OpAndNotMaskedInt32x4: + return rewriteValueAMD64_OpAndNotMaskedInt32x4(v) + case OpAndNotMaskedInt32x8: + return rewriteValueAMD64_OpAndNotMaskedInt32x8(v) + case OpAndNotMaskedInt64x2: + return rewriteValueAMD64_OpAndNotMaskedInt64x2(v) + case OpAndNotMaskedInt64x4: + return rewriteValueAMD64_OpAndNotMaskedInt64x4(v) + case OpAndNotMaskedInt64x8: + return rewriteValueAMD64_OpAndNotMaskedInt64x8(v) + case OpAndNotMaskedUint32x16: + return rewriteValueAMD64_OpAndNotMaskedUint32x16(v) + case OpAndNotMaskedUint32x4: + return rewriteValueAMD64_OpAndNotMaskedUint32x4(v) + case OpAndNotMaskedUint32x8: + return rewriteValueAMD64_OpAndNotMaskedUint32x8(v) + case OpAndNotMaskedUint64x2: + return rewriteValueAMD64_OpAndNotMaskedUint64x2(v) + case OpAndNotMaskedUint64x4: + return rewriteValueAMD64_OpAndNotMaskedUint64x4(v) + case OpAndNotMaskedUint64x8: + return rewriteValueAMD64_OpAndNotMaskedUint64x8(v) case OpAndNotUint16x16: v.Op = OpAMD64VPANDN256 return true @@ -867,6 +999,18 @@ func rewriteValueAMD64(v *Value) bool { case OpApproximateReciprocalFloat64x8: v.Op = OpAMD64VRCP14PD512 return true + case OpApproximateReciprocalMaskedFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v) + case OpApproximateReciprocalMaskedFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v) + case OpApproximateReciprocalMaskedFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v) + case OpApproximateReciprocalMaskedFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v) + case OpApproximateReciprocalMaskedFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v) + case OpApproximateReciprocalMaskedFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v) case OpApproximateReciprocalOfSqrtFloat32x16: v.Op = OpAMD64VRSQRT14PS512 return true @@ -885,6 +1029,18 @@ func rewriteValueAMD64(v *Value) bool { case OpApproximateReciprocalOfSqrtFloat64x8: v.Op = OpAMD64VRSQRT14PD512 return true + case OpApproximateReciprocalOfSqrtMaskedFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v) + case OpApproximateReciprocalOfSqrtMaskedFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v) + case OpApproximateReciprocalOfSqrtMaskedFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v) + case OpApproximateReciprocalOfSqrtMaskedFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v) + case OpApproximateReciprocalOfSqrtMaskedFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v) + case OpApproximateReciprocalOfSqrtMaskedFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v) case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -931,6 +1087,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAtomicStore8(v) case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) + case OpAverageMaskedUint16x16: + return rewriteValueAMD64_OpAverageMaskedUint16x16(v) + case OpAverageMaskedUint16x32: + return rewriteValueAMD64_OpAverageMaskedUint16x32(v) + case OpAverageMaskedUint16x8: + return rewriteValueAMD64_OpAverageMaskedUint16x8(v) + case OpAverageMaskedUint8x16: + return rewriteValueAMD64_OpAverageMaskedUint8x16(v) + case OpAverageMaskedUint8x32: + return rewriteValueAMD64_OpAverageMaskedUint8x32(v) + case OpAverageMaskedUint8x64: + return rewriteValueAMD64_OpAverageMaskedUint8x64(v) case OpAverageUint16x16: v.Op = OpAMD64VPAVGW256 return true @@ -990,6 +1158,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v) case OpCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v) + case OpCeilWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v) + case OpCeilWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v) + case OpCeilWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v) + case OpCeilWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v) + case OpCeilWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v) + case OpCeilWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v) case OpClosureCall: v.Op = OpAMD64CALLclosure return true @@ -1088,6 +1268,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) case OpDiffWithCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) + case OpDiffWithCeilWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v) + case OpDiffWithCeilWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v) + case OpDiffWithCeilWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v) + case OpDiffWithCeilWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v) + case OpDiffWithCeilWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v) + case OpDiffWithCeilWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v) case OpDiffWithFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) case OpDiffWithFloorWithPrecisionFloat32x4: @@ -1100,6 +1292,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) case OpDiffWithFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) + case OpDiffWithFloorWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v) + case OpDiffWithFloorWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v) + case OpDiffWithFloorWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v) + case OpDiffWithFloorWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v) + case OpDiffWithFloorWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v) + case OpDiffWithFloorWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v) case OpDiffWithRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) case OpDiffWithRoundWithPrecisionFloat32x4: @@ -1112,6 +1316,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) case OpDiffWithRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) + case OpDiffWithRoundWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v) + case OpDiffWithRoundWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v) + case OpDiffWithRoundWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v) + case OpDiffWithRoundWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v) + case OpDiffWithRoundWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v) + case OpDiffWithRoundWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v) case OpDiffWithTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) case OpDiffWithTruncWithPrecisionFloat32x4: @@ -1124,6 +1340,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v) case OpDiffWithTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v) + case OpDiffWithTruncWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v) + case OpDiffWithTruncWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v) + case OpDiffWithTruncWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v) + case OpDiffWithTruncWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v) + case OpDiffWithTruncWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v) + case OpDiffWithTruncWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -1167,6 +1395,18 @@ func rewriteValueAMD64(v *Value) bool { case OpDivFloat64x8: v.Op = OpAMD64VDIVPD512 return true + case OpDivMaskedFloat32x16: + return rewriteValueAMD64_OpDivMaskedFloat32x16(v) + case OpDivMaskedFloat32x4: + return rewriteValueAMD64_OpDivMaskedFloat32x4(v) + case OpDivMaskedFloat32x8: + return rewriteValueAMD64_OpDivMaskedFloat32x8(v) + case OpDivMaskedFloat64x2: + return rewriteValueAMD64_OpDivMaskedFloat64x2(v) + case OpDivMaskedFloat64x4: + return rewriteValueAMD64_OpDivMaskedFloat64x4(v) + case OpDivMaskedFloat64x8: + return rewriteValueAMD64_OpDivMaskedFloat64x8(v) case OpDotProdBroadcastFloat64x2: return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) case OpEq16: @@ -1229,6 +1469,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpEqualInt8x64: return rewriteValueAMD64_OpEqualInt8x64(v) + case OpEqualMaskedFloat32x16: + return rewriteValueAMD64_OpEqualMaskedFloat32x16(v) + case OpEqualMaskedFloat32x4: + return rewriteValueAMD64_OpEqualMaskedFloat32x4(v) + case OpEqualMaskedFloat32x8: + return rewriteValueAMD64_OpEqualMaskedFloat32x8(v) + case OpEqualMaskedFloat64x2: + return rewriteValueAMD64_OpEqualMaskedFloat64x2(v) + case OpEqualMaskedFloat64x4: + return rewriteValueAMD64_OpEqualMaskedFloat64x4(v) + case OpEqualMaskedFloat64x8: + return rewriteValueAMD64_OpEqualMaskedFloat64x8(v) + case OpEqualMaskedInt16x16: + return rewriteValueAMD64_OpEqualMaskedInt16x16(v) + case OpEqualMaskedInt16x32: + return rewriteValueAMD64_OpEqualMaskedInt16x32(v) + case OpEqualMaskedInt16x8: + return rewriteValueAMD64_OpEqualMaskedInt16x8(v) + case OpEqualMaskedInt32x16: + return rewriteValueAMD64_OpEqualMaskedInt32x16(v) + case OpEqualMaskedInt32x4: + return rewriteValueAMD64_OpEqualMaskedInt32x4(v) + case OpEqualMaskedInt32x8: + return rewriteValueAMD64_OpEqualMaskedInt32x8(v) + case OpEqualMaskedInt64x2: + return rewriteValueAMD64_OpEqualMaskedInt64x2(v) + case OpEqualMaskedInt64x4: + return rewriteValueAMD64_OpEqualMaskedInt64x4(v) + case OpEqualMaskedInt64x8: + return rewriteValueAMD64_OpEqualMaskedInt64x8(v) + case OpEqualMaskedInt8x16: + return rewriteValueAMD64_OpEqualMaskedInt8x16(v) + case OpEqualMaskedInt8x32: + return rewriteValueAMD64_OpEqualMaskedInt8x32(v) + case OpEqualMaskedInt8x64: + return rewriteValueAMD64_OpEqualMaskedInt8x64(v) + case OpEqualMaskedUint16x16: + return rewriteValueAMD64_OpEqualMaskedUint16x16(v) + case OpEqualMaskedUint16x32: + return rewriteValueAMD64_OpEqualMaskedUint16x32(v) + case OpEqualMaskedUint16x8: + return rewriteValueAMD64_OpEqualMaskedUint16x8(v) + case OpEqualMaskedUint32x16: + return rewriteValueAMD64_OpEqualMaskedUint32x16(v) + case OpEqualMaskedUint32x4: + return rewriteValueAMD64_OpEqualMaskedUint32x4(v) + case OpEqualMaskedUint32x8: + return rewriteValueAMD64_OpEqualMaskedUint32x8(v) + case OpEqualMaskedUint64x2: + return rewriteValueAMD64_OpEqualMaskedUint64x2(v) + case OpEqualMaskedUint64x4: + return rewriteValueAMD64_OpEqualMaskedUint64x4(v) + case OpEqualMaskedUint64x8: + return rewriteValueAMD64_OpEqualMaskedUint64x8(v) + case OpEqualMaskedUint8x16: + return rewriteValueAMD64_OpEqualMaskedUint8x16(v) + case OpEqualMaskedUint8x32: + return rewriteValueAMD64_OpEqualMaskedUint8x32(v) + case OpEqualMaskedUint8x64: + return rewriteValueAMD64_OpEqualMaskedUint8x64(v) case OpEqualUint16x16: return rewriteValueAMD64_OpEqualUint16x16(v) case OpEqualUint16x32: @@ -1277,6 +1577,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) case OpFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) + case OpFloorWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v) + case OpFloorWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v) + case OpFloorWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v) + case OpFloorWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v) + case OpFloorWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v) + case OpFloorWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v) case OpFusedMultiplyAddFloat32x16: v.Op = OpAMD64VFMADD213PS512 return true @@ -1295,6 +1607,18 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplyAddFloat64x8: v.Op = OpAMD64VFMADD213PD512 return true + case OpFusedMultiplyAddMaskedFloat32x16: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v) + case OpFusedMultiplyAddMaskedFloat32x4: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v) + case OpFusedMultiplyAddMaskedFloat32x8: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v) + case OpFusedMultiplyAddMaskedFloat64x2: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v) + case OpFusedMultiplyAddMaskedFloat64x4: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v) + case OpFusedMultiplyAddMaskedFloat64x8: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v) case OpFusedMultiplyAddSubFloat32x16: v.Op = OpAMD64VFMADDSUB213PS512 return true @@ -1313,6 +1637,18 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplyAddSubFloat64x8: v.Op = OpAMD64VFMADDSUB213PD512 return true + case OpFusedMultiplyAddSubMaskedFloat32x16: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v) + case OpFusedMultiplyAddSubMaskedFloat32x4: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v) + case OpFusedMultiplyAddSubMaskedFloat32x8: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v) + case OpFusedMultiplyAddSubMaskedFloat64x2: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v) + case OpFusedMultiplyAddSubMaskedFloat64x4: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v) + case OpFusedMultiplyAddSubMaskedFloat64x8: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v) case OpFusedMultiplySubAddFloat32x16: v.Op = OpAMD64VFMSUBADD213PS512 return true @@ -1331,18 +1667,48 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplySubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true + case OpFusedMultiplySubAddMaskedFloat32x16: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v) + case OpFusedMultiplySubAddMaskedFloat32x4: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v) + case OpFusedMultiplySubAddMaskedFloat32x8: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v) + case OpFusedMultiplySubAddMaskedFloat64x2: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v) + case OpFusedMultiplySubAddMaskedFloat64x4: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v) + case OpFusedMultiplySubAddMaskedFloat64x8: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v) + case OpGaloisFieldAffineTransformInversedMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v) + case OpGaloisFieldAffineTransformInversedMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v) + case OpGaloisFieldAffineTransformInversedMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v) case OpGaloisFieldAffineTransformInversedUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v) case OpGaloisFieldAffineTransformInversedUint8x32: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v) case OpGaloisFieldAffineTransformInversedUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v) + case OpGaloisFieldAffineTransformMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v) + case OpGaloisFieldAffineTransformMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v) + case OpGaloisFieldAffineTransformMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v) case OpGaloisFieldAffineTransformUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v) case OpGaloisFieldAffineTransformUint8x32: return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v) case OpGaloisFieldAffineTransformUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v) + case OpGaloisFieldMulMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v) + case OpGaloisFieldMulMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x32(v) + case OpGaloisFieldMulMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v) case OpGaloisFieldMulUint8x16: v.Op = OpAMD64VGF2P8MULB128 return true @@ -1435,6 +1801,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterEqualInt8x32(v) case OpGreaterEqualInt8x64: return rewriteValueAMD64_OpGreaterEqualInt8x64(v) + case OpGreaterEqualMaskedFloat32x16: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v) + case OpGreaterEqualMaskedFloat32x4: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v) + case OpGreaterEqualMaskedFloat32x8: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v) + case OpGreaterEqualMaskedFloat64x2: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v) + case OpGreaterEqualMaskedFloat64x4: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v) + case OpGreaterEqualMaskedFloat64x8: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v) + case OpGreaterEqualMaskedInt16x16: + return rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v) + case OpGreaterEqualMaskedInt16x32: + return rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v) + case OpGreaterEqualMaskedInt16x8: + return rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v) + case OpGreaterEqualMaskedInt32x16: + return rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v) + case OpGreaterEqualMaskedInt32x4: + return rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v) + case OpGreaterEqualMaskedInt32x8: + return rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v) + case OpGreaterEqualMaskedInt64x2: + return rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v) + case OpGreaterEqualMaskedInt64x4: + return rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v) + case OpGreaterEqualMaskedInt64x8: + return rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v) + case OpGreaterEqualMaskedInt8x16: + return rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v) + case OpGreaterEqualMaskedInt8x32: + return rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v) + case OpGreaterEqualMaskedInt8x64: + return rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v) + case OpGreaterEqualMaskedUint16x16: + return rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v) + case OpGreaterEqualMaskedUint16x32: + return rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v) + case OpGreaterEqualMaskedUint16x8: + return rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v) + case OpGreaterEqualMaskedUint32x16: + return rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v) + case OpGreaterEqualMaskedUint32x4: + return rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v) + case OpGreaterEqualMaskedUint32x8: + return rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v) + case OpGreaterEqualMaskedUint64x2: + return rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v) + case OpGreaterEqualMaskedUint64x4: + return rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v) + case OpGreaterEqualMaskedUint64x8: + return rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v) + case OpGreaterEqualMaskedUint8x16: + return rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v) + case OpGreaterEqualMaskedUint8x32: + return rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v) + case OpGreaterEqualMaskedUint8x64: + return rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v) case OpGreaterEqualUint16x16: return rewriteValueAMD64_OpGreaterEqualUint16x16(v) case OpGreaterEqualUint16x32: @@ -1502,6 +1928,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpGreaterInt8x64: return rewriteValueAMD64_OpGreaterInt8x64(v) + case OpGreaterMaskedFloat32x16: + return rewriteValueAMD64_OpGreaterMaskedFloat32x16(v) + case OpGreaterMaskedFloat32x4: + return rewriteValueAMD64_OpGreaterMaskedFloat32x4(v) + case OpGreaterMaskedFloat32x8: + return rewriteValueAMD64_OpGreaterMaskedFloat32x8(v) + case OpGreaterMaskedFloat64x2: + return rewriteValueAMD64_OpGreaterMaskedFloat64x2(v) + case OpGreaterMaskedFloat64x4: + return rewriteValueAMD64_OpGreaterMaskedFloat64x4(v) + case OpGreaterMaskedFloat64x8: + return rewriteValueAMD64_OpGreaterMaskedFloat64x8(v) + case OpGreaterMaskedInt16x16: + return rewriteValueAMD64_OpGreaterMaskedInt16x16(v) + case OpGreaterMaskedInt16x32: + return rewriteValueAMD64_OpGreaterMaskedInt16x32(v) + case OpGreaterMaskedInt16x8: + return rewriteValueAMD64_OpGreaterMaskedInt16x8(v) + case OpGreaterMaskedInt32x16: + return rewriteValueAMD64_OpGreaterMaskedInt32x16(v) + case OpGreaterMaskedInt32x4: + return rewriteValueAMD64_OpGreaterMaskedInt32x4(v) + case OpGreaterMaskedInt32x8: + return rewriteValueAMD64_OpGreaterMaskedInt32x8(v) + case OpGreaterMaskedInt64x2: + return rewriteValueAMD64_OpGreaterMaskedInt64x2(v) + case OpGreaterMaskedInt64x4: + return rewriteValueAMD64_OpGreaterMaskedInt64x4(v) + case OpGreaterMaskedInt64x8: + return rewriteValueAMD64_OpGreaterMaskedInt64x8(v) + case OpGreaterMaskedInt8x16: + return rewriteValueAMD64_OpGreaterMaskedInt8x16(v) + case OpGreaterMaskedInt8x32: + return rewriteValueAMD64_OpGreaterMaskedInt8x32(v) + case OpGreaterMaskedInt8x64: + return rewriteValueAMD64_OpGreaterMaskedInt8x64(v) + case OpGreaterMaskedUint16x16: + return rewriteValueAMD64_OpGreaterMaskedUint16x16(v) + case OpGreaterMaskedUint16x32: + return rewriteValueAMD64_OpGreaterMaskedUint16x32(v) + case OpGreaterMaskedUint16x8: + return rewriteValueAMD64_OpGreaterMaskedUint16x8(v) + case OpGreaterMaskedUint32x16: + return rewriteValueAMD64_OpGreaterMaskedUint32x16(v) + case OpGreaterMaskedUint32x4: + return rewriteValueAMD64_OpGreaterMaskedUint32x4(v) + case OpGreaterMaskedUint32x8: + return rewriteValueAMD64_OpGreaterMaskedUint32x8(v) + case OpGreaterMaskedUint64x2: + return rewriteValueAMD64_OpGreaterMaskedUint64x2(v) + case OpGreaterMaskedUint64x4: + return rewriteValueAMD64_OpGreaterMaskedUint64x4(v) + case OpGreaterMaskedUint64x8: + return rewriteValueAMD64_OpGreaterMaskedUint64x8(v) + case OpGreaterMaskedUint8x16: + return rewriteValueAMD64_OpGreaterMaskedUint8x16(v) + case OpGreaterMaskedUint8x32: + return rewriteValueAMD64_OpGreaterMaskedUint8x32(v) + case OpGreaterMaskedUint8x64: + return rewriteValueAMD64_OpGreaterMaskedUint8x64(v) case OpGreaterUint16x16: return rewriteValueAMD64_OpGreaterUint16x16(v) case OpGreaterUint16x32: @@ -1557,6 +2043,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpIsNanFloat64x4(v) case OpIsNanFloat64x8: return rewriteValueAMD64_OpIsNanFloat64x8(v) + case OpIsNanMaskedFloat32x16: + return rewriteValueAMD64_OpIsNanMaskedFloat32x16(v) + case OpIsNanMaskedFloat32x4: + return rewriteValueAMD64_OpIsNanMaskedFloat32x4(v) + case OpIsNanMaskedFloat32x8: + return rewriteValueAMD64_OpIsNanMaskedFloat32x8(v) + case OpIsNanMaskedFloat64x2: + return rewriteValueAMD64_OpIsNanMaskedFloat64x2(v) + case OpIsNanMaskedFloat64x4: + return rewriteValueAMD64_OpIsNanMaskedFloat64x4(v) + case OpIsNanMaskedFloat64x8: + return rewriteValueAMD64_OpIsNanMaskedFloat64x8(v) case OpIsNonNil: return rewriteValueAMD64_OpIsNonNil(v) case OpIsSliceInBounds: @@ -1637,6 +2135,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessEqualInt8x32(v) case OpLessEqualInt8x64: return rewriteValueAMD64_OpLessEqualInt8x64(v) + case OpLessEqualMaskedFloat32x16: + return rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v) + case OpLessEqualMaskedFloat32x4: + return rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v) + case OpLessEqualMaskedFloat32x8: + return rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v) + case OpLessEqualMaskedFloat64x2: + return rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v) + case OpLessEqualMaskedFloat64x4: + return rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v) + case OpLessEqualMaskedFloat64x8: + return rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v) + case OpLessEqualMaskedInt16x16: + return rewriteValueAMD64_OpLessEqualMaskedInt16x16(v) + case OpLessEqualMaskedInt16x32: + return rewriteValueAMD64_OpLessEqualMaskedInt16x32(v) + case OpLessEqualMaskedInt16x8: + return rewriteValueAMD64_OpLessEqualMaskedInt16x8(v) + case OpLessEqualMaskedInt32x16: + return rewriteValueAMD64_OpLessEqualMaskedInt32x16(v) + case OpLessEqualMaskedInt32x4: + return rewriteValueAMD64_OpLessEqualMaskedInt32x4(v) + case OpLessEqualMaskedInt32x8: + return rewriteValueAMD64_OpLessEqualMaskedInt32x8(v) + case OpLessEqualMaskedInt64x2: + return rewriteValueAMD64_OpLessEqualMaskedInt64x2(v) + case OpLessEqualMaskedInt64x4: + return rewriteValueAMD64_OpLessEqualMaskedInt64x4(v) + case OpLessEqualMaskedInt64x8: + return rewriteValueAMD64_OpLessEqualMaskedInt64x8(v) + case OpLessEqualMaskedInt8x16: + return rewriteValueAMD64_OpLessEqualMaskedInt8x16(v) + case OpLessEqualMaskedInt8x32: + return rewriteValueAMD64_OpLessEqualMaskedInt8x32(v) + case OpLessEqualMaskedInt8x64: + return rewriteValueAMD64_OpLessEqualMaskedInt8x64(v) + case OpLessEqualMaskedUint16x16: + return rewriteValueAMD64_OpLessEqualMaskedUint16x16(v) + case OpLessEqualMaskedUint16x32: + return rewriteValueAMD64_OpLessEqualMaskedUint16x32(v) + case OpLessEqualMaskedUint16x8: + return rewriteValueAMD64_OpLessEqualMaskedUint16x8(v) + case OpLessEqualMaskedUint32x16: + return rewriteValueAMD64_OpLessEqualMaskedUint32x16(v) + case OpLessEqualMaskedUint32x4: + return rewriteValueAMD64_OpLessEqualMaskedUint32x4(v) + case OpLessEqualMaskedUint32x8: + return rewriteValueAMD64_OpLessEqualMaskedUint32x8(v) + case OpLessEqualMaskedUint64x2: + return rewriteValueAMD64_OpLessEqualMaskedUint64x2(v) + case OpLessEqualMaskedUint64x4: + return rewriteValueAMD64_OpLessEqualMaskedUint64x4(v) + case OpLessEqualMaskedUint64x8: + return rewriteValueAMD64_OpLessEqualMaskedUint64x8(v) + case OpLessEqualMaskedUint8x16: + return rewriteValueAMD64_OpLessEqualMaskedUint8x16(v) + case OpLessEqualMaskedUint8x32: + return rewriteValueAMD64_OpLessEqualMaskedUint8x32(v) + case OpLessEqualMaskedUint8x64: + return rewriteValueAMD64_OpLessEqualMaskedUint8x64(v) case OpLessEqualUint16x16: return rewriteValueAMD64_OpLessEqualUint16x16(v) case OpLessEqualUint16x32: @@ -1697,6 +2255,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessInt8x32(v) case OpLessInt8x64: return rewriteValueAMD64_OpLessInt8x64(v) + case OpLessMaskedFloat32x16: + return rewriteValueAMD64_OpLessMaskedFloat32x16(v) + case OpLessMaskedFloat32x4: + return rewriteValueAMD64_OpLessMaskedFloat32x4(v) + case OpLessMaskedFloat32x8: + return rewriteValueAMD64_OpLessMaskedFloat32x8(v) + case OpLessMaskedFloat64x2: + return rewriteValueAMD64_OpLessMaskedFloat64x2(v) + case OpLessMaskedFloat64x4: + return rewriteValueAMD64_OpLessMaskedFloat64x4(v) + case OpLessMaskedFloat64x8: + return rewriteValueAMD64_OpLessMaskedFloat64x8(v) + case OpLessMaskedInt16x16: + return rewriteValueAMD64_OpLessMaskedInt16x16(v) + case OpLessMaskedInt16x32: + return rewriteValueAMD64_OpLessMaskedInt16x32(v) + case OpLessMaskedInt16x8: + return rewriteValueAMD64_OpLessMaskedInt16x8(v) + case OpLessMaskedInt32x16: + return rewriteValueAMD64_OpLessMaskedInt32x16(v) + case OpLessMaskedInt32x4: + return rewriteValueAMD64_OpLessMaskedInt32x4(v) + case OpLessMaskedInt32x8: + return rewriteValueAMD64_OpLessMaskedInt32x8(v) + case OpLessMaskedInt64x2: + return rewriteValueAMD64_OpLessMaskedInt64x2(v) + case OpLessMaskedInt64x4: + return rewriteValueAMD64_OpLessMaskedInt64x4(v) + case OpLessMaskedInt64x8: + return rewriteValueAMD64_OpLessMaskedInt64x8(v) + case OpLessMaskedInt8x16: + return rewriteValueAMD64_OpLessMaskedInt8x16(v) + case OpLessMaskedInt8x32: + return rewriteValueAMD64_OpLessMaskedInt8x32(v) + case OpLessMaskedInt8x64: + return rewriteValueAMD64_OpLessMaskedInt8x64(v) + case OpLessMaskedUint16x16: + return rewriteValueAMD64_OpLessMaskedUint16x16(v) + case OpLessMaskedUint16x32: + return rewriteValueAMD64_OpLessMaskedUint16x32(v) + case OpLessMaskedUint16x8: + return rewriteValueAMD64_OpLessMaskedUint16x8(v) + case OpLessMaskedUint32x16: + return rewriteValueAMD64_OpLessMaskedUint32x16(v) + case OpLessMaskedUint32x4: + return rewriteValueAMD64_OpLessMaskedUint32x4(v) + case OpLessMaskedUint32x8: + return rewriteValueAMD64_OpLessMaskedUint32x8(v) + case OpLessMaskedUint64x2: + return rewriteValueAMD64_OpLessMaskedUint64x2(v) + case OpLessMaskedUint64x4: + return rewriteValueAMD64_OpLessMaskedUint64x4(v) + case OpLessMaskedUint64x8: + return rewriteValueAMD64_OpLessMaskedUint64x8(v) + case OpLessMaskedUint8x16: + return rewriteValueAMD64_OpLessMaskedUint8x16(v) + case OpLessMaskedUint8x32: + return rewriteValueAMD64_OpLessMaskedUint8x32(v) + case OpLessMaskedUint8x64: + return rewriteValueAMD64_OpLessMaskedUint8x64(v) case OpLessUint16x16: return rewriteValueAMD64_OpLessUint16x16(v) case OpLessUint16x32: @@ -1757,1536 +2375,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLsh8x64(v) case OpLsh8x8: return rewriteValueAMD64_OpLsh8x8(v) - case OpMaskedAbsoluteInt16x16: - return rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v) - case OpMaskedAbsoluteInt16x32: - return rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v) - case OpMaskedAbsoluteInt16x8: - return rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v) - case OpMaskedAbsoluteInt32x16: - return rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v) - case OpMaskedAbsoluteInt32x4: - return rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v) - case OpMaskedAbsoluteInt32x8: - return rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v) - case OpMaskedAbsoluteInt64x2: - return rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v) - case OpMaskedAbsoluteInt64x4: - return rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v) - case OpMaskedAbsoluteInt64x8: - return rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v) - case OpMaskedAbsoluteInt8x16: - return rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v) - case OpMaskedAbsoluteInt8x32: - return rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v) - case OpMaskedAbsoluteInt8x64: - return rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v) - case OpMaskedAddFloat32x16: - return rewriteValueAMD64_OpMaskedAddFloat32x16(v) - case OpMaskedAddFloat32x4: - return rewriteValueAMD64_OpMaskedAddFloat32x4(v) - case OpMaskedAddFloat32x8: - return rewriteValueAMD64_OpMaskedAddFloat32x8(v) - case OpMaskedAddFloat64x2: - return rewriteValueAMD64_OpMaskedAddFloat64x2(v) - case OpMaskedAddFloat64x4: - return rewriteValueAMD64_OpMaskedAddFloat64x4(v) - case OpMaskedAddFloat64x8: - return rewriteValueAMD64_OpMaskedAddFloat64x8(v) - case OpMaskedAddInt16x16: - return rewriteValueAMD64_OpMaskedAddInt16x16(v) - case OpMaskedAddInt16x32: - return rewriteValueAMD64_OpMaskedAddInt16x32(v) - case OpMaskedAddInt16x8: - return rewriteValueAMD64_OpMaskedAddInt16x8(v) - case OpMaskedAddInt32x16: - return rewriteValueAMD64_OpMaskedAddInt32x16(v) - case OpMaskedAddInt32x4: - return rewriteValueAMD64_OpMaskedAddInt32x4(v) - case OpMaskedAddInt32x8: - return rewriteValueAMD64_OpMaskedAddInt32x8(v) - case OpMaskedAddInt64x2: - return rewriteValueAMD64_OpMaskedAddInt64x2(v) - case OpMaskedAddInt64x4: - return rewriteValueAMD64_OpMaskedAddInt64x4(v) - case OpMaskedAddInt64x8: - return rewriteValueAMD64_OpMaskedAddInt64x8(v) - case OpMaskedAddInt8x16: - return rewriteValueAMD64_OpMaskedAddInt8x16(v) - case OpMaskedAddInt8x32: - return rewriteValueAMD64_OpMaskedAddInt8x32(v) - case OpMaskedAddInt8x64: - return rewriteValueAMD64_OpMaskedAddInt8x64(v) - case OpMaskedAddUint16x16: - return rewriteValueAMD64_OpMaskedAddUint16x16(v) - case OpMaskedAddUint16x32: - return rewriteValueAMD64_OpMaskedAddUint16x32(v) - case OpMaskedAddUint16x8: - return rewriteValueAMD64_OpMaskedAddUint16x8(v) - case OpMaskedAddUint32x16: - return rewriteValueAMD64_OpMaskedAddUint32x16(v) - case OpMaskedAddUint32x4: - return rewriteValueAMD64_OpMaskedAddUint32x4(v) - case OpMaskedAddUint32x8: - return rewriteValueAMD64_OpMaskedAddUint32x8(v) - case OpMaskedAddUint64x2: - return rewriteValueAMD64_OpMaskedAddUint64x2(v) - case OpMaskedAddUint64x4: - return rewriteValueAMD64_OpMaskedAddUint64x4(v) - case OpMaskedAddUint64x8: - return rewriteValueAMD64_OpMaskedAddUint64x8(v) - case OpMaskedAddUint8x16: - return rewriteValueAMD64_OpMaskedAddUint8x16(v) - case OpMaskedAddUint8x32: - return rewriteValueAMD64_OpMaskedAddUint8x32(v) - case OpMaskedAddUint8x64: - return rewriteValueAMD64_OpMaskedAddUint8x64(v) - case OpMaskedAndInt32x16: - return rewriteValueAMD64_OpMaskedAndInt32x16(v) - case OpMaskedAndInt32x4: - return rewriteValueAMD64_OpMaskedAndInt32x4(v) - case OpMaskedAndInt32x8: - return rewriteValueAMD64_OpMaskedAndInt32x8(v) - case OpMaskedAndInt64x2: - return rewriteValueAMD64_OpMaskedAndInt64x2(v) - case OpMaskedAndInt64x4: - return rewriteValueAMD64_OpMaskedAndInt64x4(v) - case OpMaskedAndInt64x8: - return rewriteValueAMD64_OpMaskedAndInt64x8(v) - case OpMaskedAndNotInt32x16: - return rewriteValueAMD64_OpMaskedAndNotInt32x16(v) - case OpMaskedAndNotInt32x4: - return rewriteValueAMD64_OpMaskedAndNotInt32x4(v) - case OpMaskedAndNotInt32x8: - return rewriteValueAMD64_OpMaskedAndNotInt32x8(v) - case OpMaskedAndNotInt64x2: - return rewriteValueAMD64_OpMaskedAndNotInt64x2(v) - case OpMaskedAndNotInt64x4: - return rewriteValueAMD64_OpMaskedAndNotInt64x4(v) - case OpMaskedAndNotInt64x8: - return rewriteValueAMD64_OpMaskedAndNotInt64x8(v) - case OpMaskedAndNotUint32x16: - return rewriteValueAMD64_OpMaskedAndNotUint32x16(v) - case OpMaskedAndNotUint32x4: - return rewriteValueAMD64_OpMaskedAndNotUint32x4(v) - case OpMaskedAndNotUint32x8: - return rewriteValueAMD64_OpMaskedAndNotUint32x8(v) - case OpMaskedAndNotUint64x2: - return rewriteValueAMD64_OpMaskedAndNotUint64x2(v) - case OpMaskedAndNotUint64x4: - return rewriteValueAMD64_OpMaskedAndNotUint64x4(v) - case OpMaskedAndNotUint64x8: - return rewriteValueAMD64_OpMaskedAndNotUint64x8(v) - case OpMaskedAndUint32x16: - return rewriteValueAMD64_OpMaskedAndUint32x16(v) - case OpMaskedAndUint32x4: - return rewriteValueAMD64_OpMaskedAndUint32x4(v) - case OpMaskedAndUint32x8: - return rewriteValueAMD64_OpMaskedAndUint32x8(v) - case OpMaskedAndUint64x2: - return rewriteValueAMD64_OpMaskedAndUint64x2(v) - case OpMaskedAndUint64x4: - return rewriteValueAMD64_OpMaskedAndUint64x4(v) - case OpMaskedAndUint64x8: - return rewriteValueAMD64_OpMaskedAndUint64x8(v) - case OpMaskedApproximateReciprocalFloat32x16: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v) - case OpMaskedApproximateReciprocalFloat32x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v) - case OpMaskedApproximateReciprocalFloat32x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v) - case OpMaskedApproximateReciprocalFloat64x2: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v) - case OpMaskedApproximateReciprocalFloat64x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v) - case OpMaskedApproximateReciprocalFloat64x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v) - case OpMaskedApproximateReciprocalOfSqrtFloat32x16: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v) - case OpMaskedApproximateReciprocalOfSqrtFloat32x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v) - case OpMaskedApproximateReciprocalOfSqrtFloat32x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v) - case OpMaskedApproximateReciprocalOfSqrtFloat64x2: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v) - case OpMaskedApproximateReciprocalOfSqrtFloat64x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v) - case OpMaskedApproximateReciprocalOfSqrtFloat64x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v) - case OpMaskedAverageUint16x16: - return rewriteValueAMD64_OpMaskedAverageUint16x16(v) - case OpMaskedAverageUint16x32: - return rewriteValueAMD64_OpMaskedAverageUint16x32(v) - case OpMaskedAverageUint16x8: - return rewriteValueAMD64_OpMaskedAverageUint16x8(v) - case OpMaskedAverageUint8x16: - return rewriteValueAMD64_OpMaskedAverageUint8x16(v) - case OpMaskedAverageUint8x32: - return rewriteValueAMD64_OpMaskedAverageUint8x32(v) - case OpMaskedAverageUint8x64: - return rewriteValueAMD64_OpMaskedAverageUint8x64(v) - case OpMaskedCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v) - case OpMaskedCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v) - case OpMaskedCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v) - case OpMaskedCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v) - case OpMaskedCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v) - case OpMaskedCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v) - case OpMaskedDiffWithCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v) - case OpMaskedDiffWithCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v) - case OpMaskedDiffWithCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v) - case OpMaskedDiffWithCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v) - case OpMaskedDiffWithCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v) - case OpMaskedDiffWithFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v) - case OpMaskedDiffWithFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v) - case OpMaskedDiffWithFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v) - case OpMaskedDiffWithFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v) - case OpMaskedDiffWithFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v) - case OpMaskedDiffWithRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v) - case OpMaskedDiffWithRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v) - case OpMaskedDiffWithRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v) - case OpMaskedDiffWithRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v) - case OpMaskedDiffWithRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v) - case OpMaskedDiffWithRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v) - case OpMaskedDiffWithTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v) - case OpMaskedDiffWithTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v) - case OpMaskedDiffWithTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v) - case OpMaskedDiffWithTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v) - case OpMaskedDiffWithTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v) - case OpMaskedDiffWithTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v) - case OpMaskedDivFloat32x16: - return rewriteValueAMD64_OpMaskedDivFloat32x16(v) - case OpMaskedDivFloat32x4: - return rewriteValueAMD64_OpMaskedDivFloat32x4(v) - case OpMaskedDivFloat32x8: - return rewriteValueAMD64_OpMaskedDivFloat32x8(v) - case OpMaskedDivFloat64x2: - return rewriteValueAMD64_OpMaskedDivFloat64x2(v) - case OpMaskedDivFloat64x4: - return rewriteValueAMD64_OpMaskedDivFloat64x4(v) - case OpMaskedDivFloat64x8: - return rewriteValueAMD64_OpMaskedDivFloat64x8(v) - case OpMaskedEqualFloat32x16: - return rewriteValueAMD64_OpMaskedEqualFloat32x16(v) - case OpMaskedEqualFloat32x4: - return rewriteValueAMD64_OpMaskedEqualFloat32x4(v) - case OpMaskedEqualFloat32x8: - return rewriteValueAMD64_OpMaskedEqualFloat32x8(v) - case OpMaskedEqualFloat64x2: - return rewriteValueAMD64_OpMaskedEqualFloat64x2(v) - case OpMaskedEqualFloat64x4: - return rewriteValueAMD64_OpMaskedEqualFloat64x4(v) - case OpMaskedEqualFloat64x8: - return rewriteValueAMD64_OpMaskedEqualFloat64x8(v) - case OpMaskedEqualInt16x16: - return rewriteValueAMD64_OpMaskedEqualInt16x16(v) - case OpMaskedEqualInt16x32: - return rewriteValueAMD64_OpMaskedEqualInt16x32(v) - case OpMaskedEqualInt16x8: - return rewriteValueAMD64_OpMaskedEqualInt16x8(v) - case OpMaskedEqualInt32x16: - return rewriteValueAMD64_OpMaskedEqualInt32x16(v) - case OpMaskedEqualInt32x4: - return rewriteValueAMD64_OpMaskedEqualInt32x4(v) - case OpMaskedEqualInt32x8: - return rewriteValueAMD64_OpMaskedEqualInt32x8(v) - case OpMaskedEqualInt64x2: - return rewriteValueAMD64_OpMaskedEqualInt64x2(v) - case OpMaskedEqualInt64x4: - return rewriteValueAMD64_OpMaskedEqualInt64x4(v) - case OpMaskedEqualInt64x8: - return rewriteValueAMD64_OpMaskedEqualInt64x8(v) - case OpMaskedEqualInt8x16: - return rewriteValueAMD64_OpMaskedEqualInt8x16(v) - case OpMaskedEqualInt8x32: - return rewriteValueAMD64_OpMaskedEqualInt8x32(v) - case OpMaskedEqualInt8x64: - return rewriteValueAMD64_OpMaskedEqualInt8x64(v) - case OpMaskedEqualUint16x16: - return rewriteValueAMD64_OpMaskedEqualUint16x16(v) - case OpMaskedEqualUint16x32: - return rewriteValueAMD64_OpMaskedEqualUint16x32(v) - case OpMaskedEqualUint16x8: - return rewriteValueAMD64_OpMaskedEqualUint16x8(v) - case OpMaskedEqualUint32x16: - return rewriteValueAMD64_OpMaskedEqualUint32x16(v) - case OpMaskedEqualUint32x4: - return rewriteValueAMD64_OpMaskedEqualUint32x4(v) - case OpMaskedEqualUint32x8: - return rewriteValueAMD64_OpMaskedEqualUint32x8(v) - case OpMaskedEqualUint64x2: - return rewriteValueAMD64_OpMaskedEqualUint64x2(v) - case OpMaskedEqualUint64x4: - return rewriteValueAMD64_OpMaskedEqualUint64x4(v) - case OpMaskedEqualUint64x8: - return rewriteValueAMD64_OpMaskedEqualUint64x8(v) - case OpMaskedEqualUint8x16: - return rewriteValueAMD64_OpMaskedEqualUint8x16(v) - case OpMaskedEqualUint8x32: - return rewriteValueAMD64_OpMaskedEqualUint8x32(v) - case OpMaskedEqualUint8x64: - return rewriteValueAMD64_OpMaskedEqualUint8x64(v) - case OpMaskedFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v) - case OpMaskedFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v) - case OpMaskedFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v) - case OpMaskedFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v) - case OpMaskedFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) - case OpMaskedFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) - case OpMaskedFusedMultiplyAddFloat32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v) - case OpMaskedFusedMultiplyAddFloat32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v) - case OpMaskedFusedMultiplyAddFloat32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v) - case OpMaskedFusedMultiplyAddFloat64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v) - case OpMaskedFusedMultiplyAddFloat64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v) - case OpMaskedFusedMultiplyAddFloat64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v) - case OpMaskedFusedMultiplyAddSubFloat32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v) - case OpMaskedFusedMultiplyAddSubFloat32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v) - case OpMaskedFusedMultiplyAddSubFloat32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v) - case OpMaskedFusedMultiplyAddSubFloat64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v) - case OpMaskedFusedMultiplyAddSubFloat64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v) - case OpMaskedFusedMultiplyAddSubFloat64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v) - case OpMaskedFusedMultiplySubAddFloat32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v) - case OpMaskedFusedMultiplySubAddFloat32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v) - case OpMaskedFusedMultiplySubAddFloat32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v) - case OpMaskedFusedMultiplySubAddFloat64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v) - case OpMaskedFusedMultiplySubAddFloat64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v) - case OpMaskedFusedMultiplySubAddFloat64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v) - case OpMaskedGaloisFieldAffineTransformInversedUint8x16: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v) - case OpMaskedGaloisFieldAffineTransformInversedUint8x32: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v) - case OpMaskedGaloisFieldAffineTransformInversedUint8x64: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v) - case OpMaskedGaloisFieldAffineTransformUint8x16: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v) - case OpMaskedGaloisFieldAffineTransformUint8x32: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v) - case OpMaskedGaloisFieldAffineTransformUint8x64: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v) - case OpMaskedGaloisFieldMulUint8x16: - return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v) - case OpMaskedGaloisFieldMulUint8x32: - return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v) - case OpMaskedGaloisFieldMulUint8x64: - return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v) - case OpMaskedGreaterEqualFloat32x16: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) - case OpMaskedGreaterEqualFloat32x4: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v) - case OpMaskedGreaterEqualFloat32x8: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v) - case OpMaskedGreaterEqualFloat64x2: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v) - case OpMaskedGreaterEqualFloat64x4: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v) - case OpMaskedGreaterEqualFloat64x8: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v) - case OpMaskedGreaterEqualInt16x16: - return rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v) - case OpMaskedGreaterEqualInt16x32: - return rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v) - case OpMaskedGreaterEqualInt16x8: - return rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v) - case OpMaskedGreaterEqualInt32x16: - return rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v) - case OpMaskedGreaterEqualInt32x4: - return rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v) - case OpMaskedGreaterEqualInt32x8: - return rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v) - case OpMaskedGreaterEqualInt64x2: - return rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v) - case OpMaskedGreaterEqualInt64x4: - return rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v) - case OpMaskedGreaterEqualInt64x8: - return rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v) - case OpMaskedGreaterEqualInt8x16: - return rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v) - case OpMaskedGreaterEqualInt8x32: - return rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v) - case OpMaskedGreaterEqualInt8x64: - return rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v) - case OpMaskedGreaterEqualUint16x16: - return rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v) - case OpMaskedGreaterEqualUint16x32: - return rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v) - case OpMaskedGreaterEqualUint16x8: - return rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v) - case OpMaskedGreaterEqualUint32x16: - return rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v) - case OpMaskedGreaterEqualUint32x4: - return rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v) - case OpMaskedGreaterEqualUint32x8: - return rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v) - case OpMaskedGreaterEqualUint64x2: - return rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v) - case OpMaskedGreaterEqualUint64x4: - return rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v) - case OpMaskedGreaterEqualUint64x8: - return rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v) - case OpMaskedGreaterEqualUint8x16: - return rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v) - case OpMaskedGreaterEqualUint8x32: - return rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v) - case OpMaskedGreaterEqualUint8x64: - return rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v) - case OpMaskedGreaterFloat32x16: - return rewriteValueAMD64_OpMaskedGreaterFloat32x16(v) - case OpMaskedGreaterFloat32x4: - return rewriteValueAMD64_OpMaskedGreaterFloat32x4(v) - case OpMaskedGreaterFloat32x8: - return rewriteValueAMD64_OpMaskedGreaterFloat32x8(v) - case OpMaskedGreaterFloat64x2: - return rewriteValueAMD64_OpMaskedGreaterFloat64x2(v) - case OpMaskedGreaterFloat64x4: - return rewriteValueAMD64_OpMaskedGreaterFloat64x4(v) - case OpMaskedGreaterFloat64x8: - return rewriteValueAMD64_OpMaskedGreaterFloat64x8(v) - case OpMaskedGreaterInt16x16: - return rewriteValueAMD64_OpMaskedGreaterInt16x16(v) - case OpMaskedGreaterInt16x32: - return rewriteValueAMD64_OpMaskedGreaterInt16x32(v) - case OpMaskedGreaterInt16x8: - return rewriteValueAMD64_OpMaskedGreaterInt16x8(v) - case OpMaskedGreaterInt32x16: - return rewriteValueAMD64_OpMaskedGreaterInt32x16(v) - case OpMaskedGreaterInt32x4: - return rewriteValueAMD64_OpMaskedGreaterInt32x4(v) - case OpMaskedGreaterInt32x8: - return rewriteValueAMD64_OpMaskedGreaterInt32x8(v) - case OpMaskedGreaterInt64x2: - return rewriteValueAMD64_OpMaskedGreaterInt64x2(v) - case OpMaskedGreaterInt64x4: - return rewriteValueAMD64_OpMaskedGreaterInt64x4(v) - case OpMaskedGreaterInt64x8: - return rewriteValueAMD64_OpMaskedGreaterInt64x8(v) - case OpMaskedGreaterInt8x16: - return rewriteValueAMD64_OpMaskedGreaterInt8x16(v) - case OpMaskedGreaterInt8x32: - return rewriteValueAMD64_OpMaskedGreaterInt8x32(v) - case OpMaskedGreaterInt8x64: - return rewriteValueAMD64_OpMaskedGreaterInt8x64(v) - case OpMaskedGreaterUint16x16: - return rewriteValueAMD64_OpMaskedGreaterUint16x16(v) - case OpMaskedGreaterUint16x32: - return rewriteValueAMD64_OpMaskedGreaterUint16x32(v) - case OpMaskedGreaterUint16x8: - return rewriteValueAMD64_OpMaskedGreaterUint16x8(v) - case OpMaskedGreaterUint32x16: - return rewriteValueAMD64_OpMaskedGreaterUint32x16(v) - case OpMaskedGreaterUint32x4: - return rewriteValueAMD64_OpMaskedGreaterUint32x4(v) - case OpMaskedGreaterUint32x8: - return rewriteValueAMD64_OpMaskedGreaterUint32x8(v) - case OpMaskedGreaterUint64x2: - return rewriteValueAMD64_OpMaskedGreaterUint64x2(v) - case OpMaskedGreaterUint64x4: - return rewriteValueAMD64_OpMaskedGreaterUint64x4(v) - case OpMaskedGreaterUint64x8: - return rewriteValueAMD64_OpMaskedGreaterUint64x8(v) - case OpMaskedGreaterUint8x16: - return rewriteValueAMD64_OpMaskedGreaterUint8x16(v) - case OpMaskedGreaterUint8x32: - return rewriteValueAMD64_OpMaskedGreaterUint8x32(v) - case OpMaskedGreaterUint8x64: - return rewriteValueAMD64_OpMaskedGreaterUint8x64(v) - case OpMaskedIsNanFloat32x16: - return rewriteValueAMD64_OpMaskedIsNanFloat32x16(v) - case OpMaskedIsNanFloat32x4: - return rewriteValueAMD64_OpMaskedIsNanFloat32x4(v) - case OpMaskedIsNanFloat32x8: - return rewriteValueAMD64_OpMaskedIsNanFloat32x8(v) - case OpMaskedIsNanFloat64x2: - return rewriteValueAMD64_OpMaskedIsNanFloat64x2(v) - case OpMaskedIsNanFloat64x4: - return rewriteValueAMD64_OpMaskedIsNanFloat64x4(v) - case OpMaskedIsNanFloat64x8: - return rewriteValueAMD64_OpMaskedIsNanFloat64x8(v) - case OpMaskedLessEqualFloat32x16: - return rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v) - case OpMaskedLessEqualFloat32x4: - return rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v) - case OpMaskedLessEqualFloat32x8: - return rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v) - case OpMaskedLessEqualFloat64x2: - return rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v) - case OpMaskedLessEqualFloat64x4: - return rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v) - case OpMaskedLessEqualFloat64x8: - return rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v) - case OpMaskedLessEqualInt16x16: - return rewriteValueAMD64_OpMaskedLessEqualInt16x16(v) - case OpMaskedLessEqualInt16x32: - return rewriteValueAMD64_OpMaskedLessEqualInt16x32(v) - case OpMaskedLessEqualInt16x8: - return rewriteValueAMD64_OpMaskedLessEqualInt16x8(v) - case OpMaskedLessEqualInt32x16: - return rewriteValueAMD64_OpMaskedLessEqualInt32x16(v) - case OpMaskedLessEqualInt32x4: - return rewriteValueAMD64_OpMaskedLessEqualInt32x4(v) - case OpMaskedLessEqualInt32x8: - return rewriteValueAMD64_OpMaskedLessEqualInt32x8(v) - case OpMaskedLessEqualInt64x2: - return rewriteValueAMD64_OpMaskedLessEqualInt64x2(v) - case OpMaskedLessEqualInt64x4: - return rewriteValueAMD64_OpMaskedLessEqualInt64x4(v) - case OpMaskedLessEqualInt64x8: - return rewriteValueAMD64_OpMaskedLessEqualInt64x8(v) - case OpMaskedLessEqualInt8x16: - return rewriteValueAMD64_OpMaskedLessEqualInt8x16(v) - case OpMaskedLessEqualInt8x32: - return rewriteValueAMD64_OpMaskedLessEqualInt8x32(v) - case OpMaskedLessEqualInt8x64: - return rewriteValueAMD64_OpMaskedLessEqualInt8x64(v) - case OpMaskedLessEqualUint16x16: - return rewriteValueAMD64_OpMaskedLessEqualUint16x16(v) - case OpMaskedLessEqualUint16x32: - return rewriteValueAMD64_OpMaskedLessEqualUint16x32(v) - case OpMaskedLessEqualUint16x8: - return rewriteValueAMD64_OpMaskedLessEqualUint16x8(v) - case OpMaskedLessEqualUint32x16: - return rewriteValueAMD64_OpMaskedLessEqualUint32x16(v) - case OpMaskedLessEqualUint32x4: - return rewriteValueAMD64_OpMaskedLessEqualUint32x4(v) - case OpMaskedLessEqualUint32x8: - return rewriteValueAMD64_OpMaskedLessEqualUint32x8(v) - case OpMaskedLessEqualUint64x2: - return rewriteValueAMD64_OpMaskedLessEqualUint64x2(v) - case OpMaskedLessEqualUint64x4: - return rewriteValueAMD64_OpMaskedLessEqualUint64x4(v) - case OpMaskedLessEqualUint64x8: - return rewriteValueAMD64_OpMaskedLessEqualUint64x8(v) - case OpMaskedLessEqualUint8x16: - return rewriteValueAMD64_OpMaskedLessEqualUint8x16(v) - case OpMaskedLessEqualUint8x32: - return rewriteValueAMD64_OpMaskedLessEqualUint8x32(v) - case OpMaskedLessEqualUint8x64: - return rewriteValueAMD64_OpMaskedLessEqualUint8x64(v) - case OpMaskedLessFloat32x16: - return rewriteValueAMD64_OpMaskedLessFloat32x16(v) - case OpMaskedLessFloat32x4: - return rewriteValueAMD64_OpMaskedLessFloat32x4(v) - case OpMaskedLessFloat32x8: - return rewriteValueAMD64_OpMaskedLessFloat32x8(v) - case OpMaskedLessFloat64x2: - return rewriteValueAMD64_OpMaskedLessFloat64x2(v) - case OpMaskedLessFloat64x4: - return rewriteValueAMD64_OpMaskedLessFloat64x4(v) - case OpMaskedLessFloat64x8: - return rewriteValueAMD64_OpMaskedLessFloat64x8(v) - case OpMaskedLessInt16x16: - return rewriteValueAMD64_OpMaskedLessInt16x16(v) - case OpMaskedLessInt16x32: - return rewriteValueAMD64_OpMaskedLessInt16x32(v) - case OpMaskedLessInt16x8: - return rewriteValueAMD64_OpMaskedLessInt16x8(v) - case OpMaskedLessInt32x16: - return rewriteValueAMD64_OpMaskedLessInt32x16(v) - case OpMaskedLessInt32x4: - return rewriteValueAMD64_OpMaskedLessInt32x4(v) - case OpMaskedLessInt32x8: - return rewriteValueAMD64_OpMaskedLessInt32x8(v) - case OpMaskedLessInt64x2: - return rewriteValueAMD64_OpMaskedLessInt64x2(v) - case OpMaskedLessInt64x4: - return rewriteValueAMD64_OpMaskedLessInt64x4(v) - case OpMaskedLessInt64x8: - return rewriteValueAMD64_OpMaskedLessInt64x8(v) - case OpMaskedLessInt8x16: - return rewriteValueAMD64_OpMaskedLessInt8x16(v) - case OpMaskedLessInt8x32: - return rewriteValueAMD64_OpMaskedLessInt8x32(v) - case OpMaskedLessInt8x64: - return rewriteValueAMD64_OpMaskedLessInt8x64(v) - case OpMaskedLessUint16x16: - return rewriteValueAMD64_OpMaskedLessUint16x16(v) - case OpMaskedLessUint16x32: - return rewriteValueAMD64_OpMaskedLessUint16x32(v) - case OpMaskedLessUint16x8: - return rewriteValueAMD64_OpMaskedLessUint16x8(v) - case OpMaskedLessUint32x16: - return rewriteValueAMD64_OpMaskedLessUint32x16(v) - case OpMaskedLessUint32x4: - return rewriteValueAMD64_OpMaskedLessUint32x4(v) - case OpMaskedLessUint32x8: - return rewriteValueAMD64_OpMaskedLessUint32x8(v) - case OpMaskedLessUint64x2: - return rewriteValueAMD64_OpMaskedLessUint64x2(v) - case OpMaskedLessUint64x4: - return rewriteValueAMD64_OpMaskedLessUint64x4(v) - case OpMaskedLessUint64x8: - return rewriteValueAMD64_OpMaskedLessUint64x8(v) - case OpMaskedLessUint8x16: - return rewriteValueAMD64_OpMaskedLessUint8x16(v) - case OpMaskedLessUint8x32: - return rewriteValueAMD64_OpMaskedLessUint8x32(v) - case OpMaskedLessUint8x64: - return rewriteValueAMD64_OpMaskedLessUint8x64(v) - case OpMaskedMaxFloat32x16: - return rewriteValueAMD64_OpMaskedMaxFloat32x16(v) - case OpMaskedMaxFloat32x4: - return rewriteValueAMD64_OpMaskedMaxFloat32x4(v) - case OpMaskedMaxFloat32x8: - return rewriteValueAMD64_OpMaskedMaxFloat32x8(v) - case OpMaskedMaxFloat64x2: - return rewriteValueAMD64_OpMaskedMaxFloat64x2(v) - case OpMaskedMaxFloat64x4: - return rewriteValueAMD64_OpMaskedMaxFloat64x4(v) - case OpMaskedMaxFloat64x8: - return rewriteValueAMD64_OpMaskedMaxFloat64x8(v) - case OpMaskedMaxInt16x16: - return rewriteValueAMD64_OpMaskedMaxInt16x16(v) - case OpMaskedMaxInt16x32: - return rewriteValueAMD64_OpMaskedMaxInt16x32(v) - case OpMaskedMaxInt16x8: - return rewriteValueAMD64_OpMaskedMaxInt16x8(v) - case OpMaskedMaxInt32x16: - return rewriteValueAMD64_OpMaskedMaxInt32x16(v) - case OpMaskedMaxInt32x4: - return rewriteValueAMD64_OpMaskedMaxInt32x4(v) - case OpMaskedMaxInt32x8: - return rewriteValueAMD64_OpMaskedMaxInt32x8(v) - case OpMaskedMaxInt64x2: - return rewriteValueAMD64_OpMaskedMaxInt64x2(v) - case OpMaskedMaxInt64x4: - return rewriteValueAMD64_OpMaskedMaxInt64x4(v) - case OpMaskedMaxInt64x8: - return rewriteValueAMD64_OpMaskedMaxInt64x8(v) - case OpMaskedMaxInt8x16: - return rewriteValueAMD64_OpMaskedMaxInt8x16(v) - case OpMaskedMaxInt8x32: - return rewriteValueAMD64_OpMaskedMaxInt8x32(v) - case OpMaskedMaxInt8x64: - return rewriteValueAMD64_OpMaskedMaxInt8x64(v) - case OpMaskedMaxUint16x16: - return rewriteValueAMD64_OpMaskedMaxUint16x16(v) - case OpMaskedMaxUint16x32: - return rewriteValueAMD64_OpMaskedMaxUint16x32(v) - case OpMaskedMaxUint16x8: - return rewriteValueAMD64_OpMaskedMaxUint16x8(v) - case OpMaskedMaxUint32x16: - return rewriteValueAMD64_OpMaskedMaxUint32x16(v) - case OpMaskedMaxUint32x4: - return rewriteValueAMD64_OpMaskedMaxUint32x4(v) - case OpMaskedMaxUint32x8: - return rewriteValueAMD64_OpMaskedMaxUint32x8(v) - case OpMaskedMaxUint64x2: - return rewriteValueAMD64_OpMaskedMaxUint64x2(v) - case OpMaskedMaxUint64x4: - return rewriteValueAMD64_OpMaskedMaxUint64x4(v) - case OpMaskedMaxUint64x8: - return rewriteValueAMD64_OpMaskedMaxUint64x8(v) - case OpMaskedMaxUint8x16: - return rewriteValueAMD64_OpMaskedMaxUint8x16(v) - case OpMaskedMaxUint8x32: - return rewriteValueAMD64_OpMaskedMaxUint8x32(v) - case OpMaskedMaxUint8x64: - return rewriteValueAMD64_OpMaskedMaxUint8x64(v) - case OpMaskedMinFloat32x16: - return rewriteValueAMD64_OpMaskedMinFloat32x16(v) - case OpMaskedMinFloat32x4: - return rewriteValueAMD64_OpMaskedMinFloat32x4(v) - case OpMaskedMinFloat32x8: - return rewriteValueAMD64_OpMaskedMinFloat32x8(v) - case OpMaskedMinFloat64x2: - return rewriteValueAMD64_OpMaskedMinFloat64x2(v) - case OpMaskedMinFloat64x4: - return rewriteValueAMD64_OpMaskedMinFloat64x4(v) - case OpMaskedMinFloat64x8: - return rewriteValueAMD64_OpMaskedMinFloat64x8(v) - case OpMaskedMinInt16x16: - return rewriteValueAMD64_OpMaskedMinInt16x16(v) - case OpMaskedMinInt16x32: - return rewriteValueAMD64_OpMaskedMinInt16x32(v) - case OpMaskedMinInt16x8: - return rewriteValueAMD64_OpMaskedMinInt16x8(v) - case OpMaskedMinInt32x16: - return rewriteValueAMD64_OpMaskedMinInt32x16(v) - case OpMaskedMinInt32x4: - return rewriteValueAMD64_OpMaskedMinInt32x4(v) - case OpMaskedMinInt32x8: - return rewriteValueAMD64_OpMaskedMinInt32x8(v) - case OpMaskedMinInt64x2: - return rewriteValueAMD64_OpMaskedMinInt64x2(v) - case OpMaskedMinInt64x4: - return rewriteValueAMD64_OpMaskedMinInt64x4(v) - case OpMaskedMinInt64x8: - return rewriteValueAMD64_OpMaskedMinInt64x8(v) - case OpMaskedMinInt8x16: - return rewriteValueAMD64_OpMaskedMinInt8x16(v) - case OpMaskedMinInt8x32: - return rewriteValueAMD64_OpMaskedMinInt8x32(v) - case OpMaskedMinInt8x64: - return rewriteValueAMD64_OpMaskedMinInt8x64(v) - case OpMaskedMinUint16x16: - return rewriteValueAMD64_OpMaskedMinUint16x16(v) - case OpMaskedMinUint16x32: - return rewriteValueAMD64_OpMaskedMinUint16x32(v) - case OpMaskedMinUint16x8: - return rewriteValueAMD64_OpMaskedMinUint16x8(v) - case OpMaskedMinUint32x16: - return rewriteValueAMD64_OpMaskedMinUint32x16(v) - case OpMaskedMinUint32x4: - return rewriteValueAMD64_OpMaskedMinUint32x4(v) - case OpMaskedMinUint32x8: - return rewriteValueAMD64_OpMaskedMinUint32x8(v) - case OpMaskedMinUint64x2: - return rewriteValueAMD64_OpMaskedMinUint64x2(v) - case OpMaskedMinUint64x4: - return rewriteValueAMD64_OpMaskedMinUint64x4(v) - case OpMaskedMinUint64x8: - return rewriteValueAMD64_OpMaskedMinUint64x8(v) - case OpMaskedMinUint8x16: - return rewriteValueAMD64_OpMaskedMinUint8x16(v) - case OpMaskedMinUint8x32: - return rewriteValueAMD64_OpMaskedMinUint8x32(v) - case OpMaskedMinUint8x64: - return rewriteValueAMD64_OpMaskedMinUint8x64(v) - case OpMaskedMulByPowOf2Float32x16: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v) - case OpMaskedMulByPowOf2Float32x4: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v) - case OpMaskedMulByPowOf2Float32x8: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v) - case OpMaskedMulByPowOf2Float64x2: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v) - case OpMaskedMulByPowOf2Float64x4: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v) - case OpMaskedMulByPowOf2Float64x8: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v) - case OpMaskedMulEvenWidenInt64x2: - return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v) - case OpMaskedMulEvenWidenInt64x4: - return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v) - case OpMaskedMulEvenWidenInt64x8: - return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v) - case OpMaskedMulEvenWidenUint64x2: - return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v) - case OpMaskedMulEvenWidenUint64x4: - return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v) - case OpMaskedMulEvenWidenUint64x8: - return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v) - case OpMaskedMulFloat32x16: - return rewriteValueAMD64_OpMaskedMulFloat32x16(v) - case OpMaskedMulFloat32x4: - return rewriteValueAMD64_OpMaskedMulFloat32x4(v) - case OpMaskedMulFloat32x8: - return rewriteValueAMD64_OpMaskedMulFloat32x8(v) - case OpMaskedMulFloat64x2: - return rewriteValueAMD64_OpMaskedMulFloat64x2(v) - case OpMaskedMulFloat64x4: - return rewriteValueAMD64_OpMaskedMulFloat64x4(v) - case OpMaskedMulFloat64x8: - return rewriteValueAMD64_OpMaskedMulFloat64x8(v) - case OpMaskedMulHighInt16x16: - return rewriteValueAMD64_OpMaskedMulHighInt16x16(v) - case OpMaskedMulHighInt16x32: - return rewriteValueAMD64_OpMaskedMulHighInt16x32(v) - case OpMaskedMulHighInt16x8: - return rewriteValueAMD64_OpMaskedMulHighInt16x8(v) - case OpMaskedMulHighUint16x16: - return rewriteValueAMD64_OpMaskedMulHighUint16x16(v) - case OpMaskedMulHighUint16x32: - return rewriteValueAMD64_OpMaskedMulHighUint16x32(v) - case OpMaskedMulHighUint16x8: - return rewriteValueAMD64_OpMaskedMulHighUint16x8(v) - case OpMaskedMulLowInt16x16: - return rewriteValueAMD64_OpMaskedMulLowInt16x16(v) - case OpMaskedMulLowInt16x32: - return rewriteValueAMD64_OpMaskedMulLowInt16x32(v) - case OpMaskedMulLowInt16x8: - return rewriteValueAMD64_OpMaskedMulLowInt16x8(v) - case OpMaskedMulLowInt32x16: - return rewriteValueAMD64_OpMaskedMulLowInt32x16(v) - case OpMaskedMulLowInt32x4: - return rewriteValueAMD64_OpMaskedMulLowInt32x4(v) - case OpMaskedMulLowInt32x8: - return rewriteValueAMD64_OpMaskedMulLowInt32x8(v) - case OpMaskedMulLowInt64x2: - return rewriteValueAMD64_OpMaskedMulLowInt64x2(v) - case OpMaskedMulLowInt64x4: - return rewriteValueAMD64_OpMaskedMulLowInt64x4(v) - case OpMaskedMulLowInt64x8: - return rewriteValueAMD64_OpMaskedMulLowInt64x8(v) - case OpMaskedNotEqualFloat32x16: - return rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v) - case OpMaskedNotEqualFloat32x4: - return rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v) - case OpMaskedNotEqualFloat32x8: - return rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v) - case OpMaskedNotEqualFloat64x2: - return rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v) - case OpMaskedNotEqualFloat64x4: - return rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v) - case OpMaskedNotEqualFloat64x8: - return rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v) - case OpMaskedNotEqualInt16x16: - return rewriteValueAMD64_OpMaskedNotEqualInt16x16(v) - case OpMaskedNotEqualInt16x32: - return rewriteValueAMD64_OpMaskedNotEqualInt16x32(v) - case OpMaskedNotEqualInt16x8: - return rewriteValueAMD64_OpMaskedNotEqualInt16x8(v) - case OpMaskedNotEqualInt32x16: - return rewriteValueAMD64_OpMaskedNotEqualInt32x16(v) - case OpMaskedNotEqualInt32x4: - return rewriteValueAMD64_OpMaskedNotEqualInt32x4(v) - case OpMaskedNotEqualInt32x8: - return rewriteValueAMD64_OpMaskedNotEqualInt32x8(v) - case OpMaskedNotEqualInt64x2: - return rewriteValueAMD64_OpMaskedNotEqualInt64x2(v) - case OpMaskedNotEqualInt64x4: - return rewriteValueAMD64_OpMaskedNotEqualInt64x4(v) - case OpMaskedNotEqualInt64x8: - return rewriteValueAMD64_OpMaskedNotEqualInt64x8(v) - case OpMaskedNotEqualInt8x16: - return rewriteValueAMD64_OpMaskedNotEqualInt8x16(v) - case OpMaskedNotEqualInt8x32: - return rewriteValueAMD64_OpMaskedNotEqualInt8x32(v) - case OpMaskedNotEqualInt8x64: - return rewriteValueAMD64_OpMaskedNotEqualInt8x64(v) - case OpMaskedNotEqualUint16x16: - return rewriteValueAMD64_OpMaskedNotEqualUint16x16(v) - case OpMaskedNotEqualUint16x32: - return rewriteValueAMD64_OpMaskedNotEqualUint16x32(v) - case OpMaskedNotEqualUint16x8: - return rewriteValueAMD64_OpMaskedNotEqualUint16x8(v) - case OpMaskedNotEqualUint32x16: - return rewriteValueAMD64_OpMaskedNotEqualUint32x16(v) - case OpMaskedNotEqualUint32x4: - return rewriteValueAMD64_OpMaskedNotEqualUint32x4(v) - case OpMaskedNotEqualUint32x8: - return rewriteValueAMD64_OpMaskedNotEqualUint32x8(v) - case OpMaskedNotEqualUint64x2: - return rewriteValueAMD64_OpMaskedNotEqualUint64x2(v) - case OpMaskedNotEqualUint64x4: - return rewriteValueAMD64_OpMaskedNotEqualUint64x4(v) - case OpMaskedNotEqualUint64x8: - return rewriteValueAMD64_OpMaskedNotEqualUint64x8(v) - case OpMaskedNotEqualUint8x16: - return rewriteValueAMD64_OpMaskedNotEqualUint8x16(v) - case OpMaskedNotEqualUint8x32: - return rewriteValueAMD64_OpMaskedNotEqualUint8x32(v) - case OpMaskedNotEqualUint8x64: - return rewriteValueAMD64_OpMaskedNotEqualUint8x64(v) - case OpMaskedOrInt32x16: - return rewriteValueAMD64_OpMaskedOrInt32x16(v) - case OpMaskedOrInt32x4: - return rewriteValueAMD64_OpMaskedOrInt32x4(v) - case OpMaskedOrInt32x8: - return rewriteValueAMD64_OpMaskedOrInt32x8(v) - case OpMaskedOrInt64x2: - return rewriteValueAMD64_OpMaskedOrInt64x2(v) - case OpMaskedOrInt64x4: - return rewriteValueAMD64_OpMaskedOrInt64x4(v) - case OpMaskedOrInt64x8: - return rewriteValueAMD64_OpMaskedOrInt64x8(v) - case OpMaskedOrUint32x16: - return rewriteValueAMD64_OpMaskedOrUint32x16(v) - case OpMaskedOrUint32x4: - return rewriteValueAMD64_OpMaskedOrUint32x4(v) - case OpMaskedOrUint32x8: - return rewriteValueAMD64_OpMaskedOrUint32x8(v) - case OpMaskedOrUint64x2: - return rewriteValueAMD64_OpMaskedOrUint64x2(v) - case OpMaskedOrUint64x4: - return rewriteValueAMD64_OpMaskedOrUint64x4(v) - case OpMaskedOrUint64x8: - return rewriteValueAMD64_OpMaskedOrUint64x8(v) - case OpMaskedPairDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v) - case OpMaskedPairDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v) - case OpMaskedPairDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v) - case OpMaskedPairDotProdInt16x16: - return rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v) - case OpMaskedPairDotProdInt16x32: - return rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v) - case OpMaskedPairDotProdInt16x8: - return rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v) - case OpMaskedPopCountInt16x16: - return rewriteValueAMD64_OpMaskedPopCountInt16x16(v) - case OpMaskedPopCountInt16x32: - return rewriteValueAMD64_OpMaskedPopCountInt16x32(v) - case OpMaskedPopCountInt16x8: - return rewriteValueAMD64_OpMaskedPopCountInt16x8(v) - case OpMaskedPopCountInt32x16: - return rewriteValueAMD64_OpMaskedPopCountInt32x16(v) - case OpMaskedPopCountInt32x4: - return rewriteValueAMD64_OpMaskedPopCountInt32x4(v) - case OpMaskedPopCountInt32x8: - return rewriteValueAMD64_OpMaskedPopCountInt32x8(v) - case OpMaskedPopCountInt64x2: - return rewriteValueAMD64_OpMaskedPopCountInt64x2(v) - case OpMaskedPopCountInt64x4: - return rewriteValueAMD64_OpMaskedPopCountInt64x4(v) - case OpMaskedPopCountInt64x8: - return rewriteValueAMD64_OpMaskedPopCountInt64x8(v) - case OpMaskedPopCountInt8x16: - return rewriteValueAMD64_OpMaskedPopCountInt8x16(v) - case OpMaskedPopCountInt8x32: - return rewriteValueAMD64_OpMaskedPopCountInt8x32(v) - case OpMaskedPopCountInt8x64: - return rewriteValueAMD64_OpMaskedPopCountInt8x64(v) - case OpMaskedPopCountUint16x16: - return rewriteValueAMD64_OpMaskedPopCountUint16x16(v) - case OpMaskedPopCountUint16x32: - return rewriteValueAMD64_OpMaskedPopCountUint16x32(v) - case OpMaskedPopCountUint16x8: - return rewriteValueAMD64_OpMaskedPopCountUint16x8(v) - case OpMaskedPopCountUint32x16: - return rewriteValueAMD64_OpMaskedPopCountUint32x16(v) - case OpMaskedPopCountUint32x4: - return rewriteValueAMD64_OpMaskedPopCountUint32x4(v) - case OpMaskedPopCountUint32x8: - return rewriteValueAMD64_OpMaskedPopCountUint32x8(v) - case OpMaskedPopCountUint64x2: - return rewriteValueAMD64_OpMaskedPopCountUint64x2(v) - case OpMaskedPopCountUint64x4: - return rewriteValueAMD64_OpMaskedPopCountUint64x4(v) - case OpMaskedPopCountUint64x8: - return rewriteValueAMD64_OpMaskedPopCountUint64x8(v) - case OpMaskedPopCountUint8x16: - return rewriteValueAMD64_OpMaskedPopCountUint8x16(v) - case OpMaskedPopCountUint8x32: - return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) - case OpMaskedPopCountUint8x64: - return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) - case OpMaskedRotateAllLeftInt32x16: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v) - case OpMaskedRotateAllLeftInt32x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v) - case OpMaskedRotateAllLeftInt32x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v) - case OpMaskedRotateAllLeftInt64x2: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v) - case OpMaskedRotateAllLeftInt64x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v) - case OpMaskedRotateAllLeftInt64x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v) - case OpMaskedRotateAllLeftUint32x16: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v) - case OpMaskedRotateAllLeftUint32x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v) - case OpMaskedRotateAllLeftUint32x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v) - case OpMaskedRotateAllLeftUint64x2: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v) - case OpMaskedRotateAllLeftUint64x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v) - case OpMaskedRotateAllLeftUint64x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v) - case OpMaskedRotateAllRightInt32x16: - return rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v) - case OpMaskedRotateAllRightInt32x4: - return rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v) - case OpMaskedRotateAllRightInt32x8: - return rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v) - case OpMaskedRotateAllRightInt64x2: - return rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v) - case OpMaskedRotateAllRightInt64x4: - return rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v) - case OpMaskedRotateAllRightInt64x8: - return rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v) - case OpMaskedRotateAllRightUint32x16: - return rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v) - case OpMaskedRotateAllRightUint32x4: - return rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v) - case OpMaskedRotateAllRightUint32x8: - return rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v) - case OpMaskedRotateAllRightUint64x2: - return rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v) - case OpMaskedRotateAllRightUint64x4: - return rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v) - case OpMaskedRotateAllRightUint64x8: - return rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v) - case OpMaskedRotateLeftInt32x16: - return rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v) - case OpMaskedRotateLeftInt32x4: - return rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v) - case OpMaskedRotateLeftInt32x8: - return rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v) - case OpMaskedRotateLeftInt64x2: - return rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v) - case OpMaskedRotateLeftInt64x4: - return rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v) - case OpMaskedRotateLeftInt64x8: - return rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v) - case OpMaskedRotateLeftUint32x16: - return rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v) - case OpMaskedRotateLeftUint32x4: - return rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v) - case OpMaskedRotateLeftUint32x8: - return rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v) - case OpMaskedRotateLeftUint64x2: - return rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v) - case OpMaskedRotateLeftUint64x4: - return rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v) - case OpMaskedRotateLeftUint64x8: - return rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v) - case OpMaskedRotateRightInt32x16: - return rewriteValueAMD64_OpMaskedRotateRightInt32x16(v) - case OpMaskedRotateRightInt32x4: - return rewriteValueAMD64_OpMaskedRotateRightInt32x4(v) - case OpMaskedRotateRightInt32x8: - return rewriteValueAMD64_OpMaskedRotateRightInt32x8(v) - case OpMaskedRotateRightInt64x2: - return rewriteValueAMD64_OpMaskedRotateRightInt64x2(v) - case OpMaskedRotateRightInt64x4: - return rewriteValueAMD64_OpMaskedRotateRightInt64x4(v) - case OpMaskedRotateRightInt64x8: - return rewriteValueAMD64_OpMaskedRotateRightInt64x8(v) - case OpMaskedRotateRightUint32x16: - return rewriteValueAMD64_OpMaskedRotateRightUint32x16(v) - case OpMaskedRotateRightUint32x4: - return rewriteValueAMD64_OpMaskedRotateRightUint32x4(v) - case OpMaskedRotateRightUint32x8: - return rewriteValueAMD64_OpMaskedRotateRightUint32x8(v) - case OpMaskedRotateRightUint64x2: - return rewriteValueAMD64_OpMaskedRotateRightUint64x2(v) - case OpMaskedRotateRightUint64x4: - return rewriteValueAMD64_OpMaskedRotateRightUint64x4(v) - case OpMaskedRotateRightUint64x8: - return rewriteValueAMD64_OpMaskedRotateRightUint64x8(v) - case OpMaskedRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v) - case OpMaskedRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v) - case OpMaskedRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v) - case OpMaskedRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v) - case OpMaskedRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v) - case OpMaskedRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v) - case OpMaskedSaturatedAddInt16x16: - return rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v) - case OpMaskedSaturatedAddInt16x32: - return rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v) - case OpMaskedSaturatedAddInt16x8: - return rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v) - case OpMaskedSaturatedAddInt8x16: - return rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v) - case OpMaskedSaturatedAddInt8x32: - return rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v) - case OpMaskedSaturatedAddInt8x64: - return rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v) - case OpMaskedSaturatedAddUint16x16: - return rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v) - case OpMaskedSaturatedAddUint16x32: - return rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v) - case OpMaskedSaturatedAddUint16x8: - return rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v) - case OpMaskedSaturatedAddUint8x16: - return rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v) - case OpMaskedSaturatedAddUint8x32: - return rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v) - case OpMaskedSaturatedAddUint8x64: - return rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v) - case OpMaskedSaturatedPairDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v) - case OpMaskedSaturatedPairDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v) - case OpMaskedSaturatedPairDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v) - case OpMaskedSaturatedSubInt16x16: - return rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v) - case OpMaskedSaturatedSubInt16x32: - return rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v) - case OpMaskedSaturatedSubInt16x8: - return rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v) - case OpMaskedSaturatedSubInt8x16: - return rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v) - case OpMaskedSaturatedSubInt8x32: - return rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v) - case OpMaskedSaturatedSubInt8x64: - return rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v) - case OpMaskedSaturatedSubUint16x16: - return rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v) - case OpMaskedSaturatedSubUint16x32: - return rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v) - case OpMaskedSaturatedSubUint16x8: - return rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v) - case OpMaskedSaturatedSubUint8x16: - return rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v) - case OpMaskedSaturatedSubUint8x32: - return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) - case OpMaskedSaturatedSubUint8x64: - return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v) - case OpMaskedShiftAllLeftInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v) - case OpMaskedShiftAllLeftInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v) - case OpMaskedShiftAllLeftInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v) - case OpMaskedShiftAllLeftUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v) - case OpMaskedShiftAllLeftUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v) - case OpMaskedShiftAllLeftUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v) - case OpMaskedShiftAllRightAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v) - case OpMaskedShiftAllRightAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v) - case OpMaskedShiftAllRightAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v) - case OpMaskedShiftAllRightAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v) - case OpMaskedShiftAllRightAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v) - case OpMaskedShiftAllRightAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v) - case OpMaskedShiftAllRightAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v) - case OpMaskedShiftAllRightAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v) - case OpMaskedShiftAllRightAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v) - case OpMaskedShiftAllRightAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v) - case OpMaskedShiftAllRightAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v) - case OpMaskedShiftAllRightAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v) - case OpMaskedShiftAllRightAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v) - case OpMaskedShiftAllRightAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v) - case OpMaskedShiftAllRightAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v) - case OpMaskedShiftAllRightAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v) - case OpMaskedShiftAllRightAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v) - case OpMaskedShiftAllRightAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v) - case OpMaskedShiftAllRightInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v) - case OpMaskedShiftAllRightInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v) - case OpMaskedShiftAllRightInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v) - case OpMaskedShiftAllRightSignExtendedInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v) - case OpMaskedShiftAllRightSignExtendedInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v) - case OpMaskedShiftAllRightSignExtendedInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v) - case OpMaskedShiftAllRightUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v) - case OpMaskedShiftAllRightUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v) - case OpMaskedShiftAllRightUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v) - case OpMaskedShiftLeftAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v) - case OpMaskedShiftLeftAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v) - case OpMaskedShiftLeftAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v) - case OpMaskedShiftLeftAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v) - case OpMaskedShiftLeftAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v) - case OpMaskedShiftLeftAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v) - case OpMaskedShiftLeftAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v) - case OpMaskedShiftLeftAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v) - case OpMaskedShiftLeftAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v) - case OpMaskedShiftLeftAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v) - case OpMaskedShiftLeftAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v) - case OpMaskedShiftLeftAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v) - case OpMaskedShiftLeftAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v) - case OpMaskedShiftLeftAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v) - case OpMaskedShiftLeftAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v) - case OpMaskedShiftLeftAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v) - case OpMaskedShiftLeftAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v) - case OpMaskedShiftLeftAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v) - case OpMaskedShiftLeftInt16x16: - return rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v) - case OpMaskedShiftLeftInt16x32: - return rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v) - case OpMaskedShiftLeftInt16x8: - return rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v) - case OpMaskedShiftLeftInt32x16: - return rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v) - case OpMaskedShiftLeftInt32x4: - return rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v) - case OpMaskedShiftLeftInt32x8: - return rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v) - case OpMaskedShiftLeftInt64x2: - return rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v) - case OpMaskedShiftLeftInt64x4: - return rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v) - case OpMaskedShiftLeftInt64x8: - return rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v) - case OpMaskedShiftLeftUint16x16: - return rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v) - case OpMaskedShiftLeftUint16x32: - return rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v) - case OpMaskedShiftLeftUint16x8: - return rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v) - case OpMaskedShiftLeftUint32x16: - return rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v) - case OpMaskedShiftLeftUint32x4: - return rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v) - case OpMaskedShiftLeftUint32x8: - return rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v) - case OpMaskedShiftLeftUint64x2: - return rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v) - case OpMaskedShiftLeftUint64x4: - return rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v) - case OpMaskedShiftLeftUint64x8: - return rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v) - case OpMaskedShiftRightAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v) - case OpMaskedShiftRightAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v) - case OpMaskedShiftRightAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v) - case OpMaskedShiftRightAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v) - case OpMaskedShiftRightAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v) - case OpMaskedShiftRightAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v) - case OpMaskedShiftRightAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v) - case OpMaskedShiftRightAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v) - case OpMaskedShiftRightAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v) - case OpMaskedShiftRightAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v) - case OpMaskedShiftRightAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v) - case OpMaskedShiftRightAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v) - case OpMaskedShiftRightAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v) - case OpMaskedShiftRightAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v) - case OpMaskedShiftRightAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v) - case OpMaskedShiftRightAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v) - case OpMaskedShiftRightAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v) - case OpMaskedShiftRightAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v) - case OpMaskedShiftRightInt16x16: - return rewriteValueAMD64_OpMaskedShiftRightInt16x16(v) - case OpMaskedShiftRightInt16x32: - return rewriteValueAMD64_OpMaskedShiftRightInt16x32(v) - case OpMaskedShiftRightInt16x8: - return rewriteValueAMD64_OpMaskedShiftRightInt16x8(v) - case OpMaskedShiftRightInt32x16: - return rewriteValueAMD64_OpMaskedShiftRightInt32x16(v) - case OpMaskedShiftRightInt32x4: - return rewriteValueAMD64_OpMaskedShiftRightInt32x4(v) - case OpMaskedShiftRightInt32x8: - return rewriteValueAMD64_OpMaskedShiftRightInt32x8(v) - case OpMaskedShiftRightInt64x2: - return rewriteValueAMD64_OpMaskedShiftRightInt64x2(v) - case OpMaskedShiftRightInt64x4: - return rewriteValueAMD64_OpMaskedShiftRightInt64x4(v) - case OpMaskedShiftRightInt64x8: - return rewriteValueAMD64_OpMaskedShiftRightInt64x8(v) - case OpMaskedShiftRightSignExtendedInt16x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v) - case OpMaskedShiftRightSignExtendedInt16x32: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v) - case OpMaskedShiftRightSignExtendedInt16x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v) - case OpMaskedShiftRightSignExtendedInt32x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v) - case OpMaskedShiftRightSignExtendedInt32x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v) - case OpMaskedShiftRightSignExtendedInt32x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v) - case OpMaskedShiftRightSignExtendedInt64x2: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v) - case OpMaskedShiftRightSignExtendedInt64x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v) - case OpMaskedShiftRightSignExtendedInt64x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v) - case OpMaskedShiftRightSignExtendedUint16x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v) - case OpMaskedShiftRightSignExtendedUint16x32: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v) - case OpMaskedShiftRightSignExtendedUint16x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v) - case OpMaskedShiftRightSignExtendedUint32x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v) - case OpMaskedShiftRightSignExtendedUint32x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v) - case OpMaskedShiftRightSignExtendedUint32x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v) - case OpMaskedShiftRightSignExtendedUint64x2: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v) - case OpMaskedShiftRightSignExtendedUint64x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v) - case OpMaskedShiftRightSignExtendedUint64x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v) - case OpMaskedShiftRightUint16x16: - return rewriteValueAMD64_OpMaskedShiftRightUint16x16(v) - case OpMaskedShiftRightUint16x32: - return rewriteValueAMD64_OpMaskedShiftRightUint16x32(v) - case OpMaskedShiftRightUint16x8: - return rewriteValueAMD64_OpMaskedShiftRightUint16x8(v) - case OpMaskedShiftRightUint32x16: - return rewriteValueAMD64_OpMaskedShiftRightUint32x16(v) - case OpMaskedShiftRightUint32x4: - return rewriteValueAMD64_OpMaskedShiftRightUint32x4(v) - case OpMaskedShiftRightUint32x8: - return rewriteValueAMD64_OpMaskedShiftRightUint32x8(v) - case OpMaskedShiftRightUint64x2: - return rewriteValueAMD64_OpMaskedShiftRightUint64x2(v) - case OpMaskedShiftRightUint64x4: - return rewriteValueAMD64_OpMaskedShiftRightUint64x4(v) - case OpMaskedShiftRightUint64x8: - return rewriteValueAMD64_OpMaskedShiftRightUint64x8(v) - case OpMaskedSqrtFloat32x16: - return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) - case OpMaskedSqrtFloat32x4: - return rewriteValueAMD64_OpMaskedSqrtFloat32x4(v) - case OpMaskedSqrtFloat32x8: - return rewriteValueAMD64_OpMaskedSqrtFloat32x8(v) - case OpMaskedSqrtFloat64x2: - return rewriteValueAMD64_OpMaskedSqrtFloat64x2(v) - case OpMaskedSqrtFloat64x4: - return rewriteValueAMD64_OpMaskedSqrtFloat64x4(v) - case OpMaskedSqrtFloat64x8: - return rewriteValueAMD64_OpMaskedSqrtFloat64x8(v) - case OpMaskedSubFloat32x16: - return rewriteValueAMD64_OpMaskedSubFloat32x16(v) - case OpMaskedSubFloat32x4: - return rewriteValueAMD64_OpMaskedSubFloat32x4(v) - case OpMaskedSubFloat32x8: - return rewriteValueAMD64_OpMaskedSubFloat32x8(v) - case OpMaskedSubFloat64x2: - return rewriteValueAMD64_OpMaskedSubFloat64x2(v) - case OpMaskedSubFloat64x4: - return rewriteValueAMD64_OpMaskedSubFloat64x4(v) - case OpMaskedSubFloat64x8: - return rewriteValueAMD64_OpMaskedSubFloat64x8(v) - case OpMaskedSubInt16x16: - return rewriteValueAMD64_OpMaskedSubInt16x16(v) - case OpMaskedSubInt16x32: - return rewriteValueAMD64_OpMaskedSubInt16x32(v) - case OpMaskedSubInt16x8: - return rewriteValueAMD64_OpMaskedSubInt16x8(v) - case OpMaskedSubInt32x16: - return rewriteValueAMD64_OpMaskedSubInt32x16(v) - case OpMaskedSubInt32x4: - return rewriteValueAMD64_OpMaskedSubInt32x4(v) - case OpMaskedSubInt32x8: - return rewriteValueAMD64_OpMaskedSubInt32x8(v) - case OpMaskedSubInt64x2: - return rewriteValueAMD64_OpMaskedSubInt64x2(v) - case OpMaskedSubInt64x4: - return rewriteValueAMD64_OpMaskedSubInt64x4(v) - case OpMaskedSubInt64x8: - return rewriteValueAMD64_OpMaskedSubInt64x8(v) - case OpMaskedSubInt8x16: - return rewriteValueAMD64_OpMaskedSubInt8x16(v) - case OpMaskedSubInt8x32: - return rewriteValueAMD64_OpMaskedSubInt8x32(v) - case OpMaskedSubInt8x64: - return rewriteValueAMD64_OpMaskedSubInt8x64(v) - case OpMaskedSubUint16x16: - return rewriteValueAMD64_OpMaskedSubUint16x16(v) - case OpMaskedSubUint16x32: - return rewriteValueAMD64_OpMaskedSubUint16x32(v) - case OpMaskedSubUint16x8: - return rewriteValueAMD64_OpMaskedSubUint16x8(v) - case OpMaskedSubUint32x16: - return rewriteValueAMD64_OpMaskedSubUint32x16(v) - case OpMaskedSubUint32x4: - return rewriteValueAMD64_OpMaskedSubUint32x4(v) - case OpMaskedSubUint32x8: - return rewriteValueAMD64_OpMaskedSubUint32x8(v) - case OpMaskedSubUint64x2: - return rewriteValueAMD64_OpMaskedSubUint64x2(v) - case OpMaskedSubUint64x4: - return rewriteValueAMD64_OpMaskedSubUint64x4(v) - case OpMaskedSubUint64x8: - return rewriteValueAMD64_OpMaskedSubUint64x8(v) - case OpMaskedSubUint8x16: - return rewriteValueAMD64_OpMaskedSubUint8x16(v) - case OpMaskedSubUint8x32: - return rewriteValueAMD64_OpMaskedSubUint8x32(v) - case OpMaskedSubUint8x64: - return rewriteValueAMD64_OpMaskedSubUint8x64(v) - case OpMaskedTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v) - case OpMaskedTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v) - case OpMaskedTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v) - case OpMaskedTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v) - case OpMaskedTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v) - case OpMaskedTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v) - case OpMaskedXorInt32x16: - return rewriteValueAMD64_OpMaskedXorInt32x16(v) - case OpMaskedXorInt32x4: - return rewriteValueAMD64_OpMaskedXorInt32x4(v) - case OpMaskedXorInt32x8: - return rewriteValueAMD64_OpMaskedXorInt32x8(v) - case OpMaskedXorInt64x2: - return rewriteValueAMD64_OpMaskedXorInt64x2(v) - case OpMaskedXorInt64x4: - return rewriteValueAMD64_OpMaskedXorInt64x4(v) - case OpMaskedXorInt64x8: - return rewriteValueAMD64_OpMaskedXorInt64x8(v) - case OpMaskedXorUint32x16: - return rewriteValueAMD64_OpMaskedXorUint32x16(v) - case OpMaskedXorUint32x4: - return rewriteValueAMD64_OpMaskedXorUint32x4(v) - case OpMaskedXorUint32x8: - return rewriteValueAMD64_OpMaskedXorUint32x8(v) - case OpMaskedXorUint64x2: - return rewriteValueAMD64_OpMaskedXorUint64x2(v) - case OpMaskedXorUint64x4: - return rewriteValueAMD64_OpMaskedXorUint64x4(v) - case OpMaskedXorUint64x8: - return rewriteValueAMD64_OpMaskedXorUint64x8(v) case OpMax32F: return rewriteValueAMD64_OpMax32F(v) case OpMax64F: @@ -3345,6 +2433,66 @@ func rewriteValueAMD64(v *Value) bool { case OpMaxInt8x64: v.Op = OpAMD64VPMAXSB512 return true + case OpMaxMaskedFloat32x16: + return rewriteValueAMD64_OpMaxMaskedFloat32x16(v) + case OpMaxMaskedFloat32x4: + return rewriteValueAMD64_OpMaxMaskedFloat32x4(v) + case OpMaxMaskedFloat32x8: + return rewriteValueAMD64_OpMaxMaskedFloat32x8(v) + case OpMaxMaskedFloat64x2: + return rewriteValueAMD64_OpMaxMaskedFloat64x2(v) + case OpMaxMaskedFloat64x4: + return rewriteValueAMD64_OpMaxMaskedFloat64x4(v) + case OpMaxMaskedFloat64x8: + return rewriteValueAMD64_OpMaxMaskedFloat64x8(v) + case OpMaxMaskedInt16x16: + return rewriteValueAMD64_OpMaxMaskedInt16x16(v) + case OpMaxMaskedInt16x32: + return rewriteValueAMD64_OpMaxMaskedInt16x32(v) + case OpMaxMaskedInt16x8: + return rewriteValueAMD64_OpMaxMaskedInt16x8(v) + case OpMaxMaskedInt32x16: + return rewriteValueAMD64_OpMaxMaskedInt32x16(v) + case OpMaxMaskedInt32x4: + return rewriteValueAMD64_OpMaxMaskedInt32x4(v) + case OpMaxMaskedInt32x8: + return rewriteValueAMD64_OpMaxMaskedInt32x8(v) + case OpMaxMaskedInt64x2: + return rewriteValueAMD64_OpMaxMaskedInt64x2(v) + case OpMaxMaskedInt64x4: + return rewriteValueAMD64_OpMaxMaskedInt64x4(v) + case OpMaxMaskedInt64x8: + return rewriteValueAMD64_OpMaxMaskedInt64x8(v) + case OpMaxMaskedInt8x16: + return rewriteValueAMD64_OpMaxMaskedInt8x16(v) + case OpMaxMaskedInt8x32: + return rewriteValueAMD64_OpMaxMaskedInt8x32(v) + case OpMaxMaskedInt8x64: + return rewriteValueAMD64_OpMaxMaskedInt8x64(v) + case OpMaxMaskedUint16x16: + return rewriteValueAMD64_OpMaxMaskedUint16x16(v) + case OpMaxMaskedUint16x32: + return rewriteValueAMD64_OpMaxMaskedUint16x32(v) + case OpMaxMaskedUint16x8: + return rewriteValueAMD64_OpMaxMaskedUint16x8(v) + case OpMaxMaskedUint32x16: + return rewriteValueAMD64_OpMaxMaskedUint32x16(v) + case OpMaxMaskedUint32x4: + return rewriteValueAMD64_OpMaxMaskedUint32x4(v) + case OpMaxMaskedUint32x8: + return rewriteValueAMD64_OpMaxMaskedUint32x8(v) + case OpMaxMaskedUint64x2: + return rewriteValueAMD64_OpMaxMaskedUint64x2(v) + case OpMaxMaskedUint64x4: + return rewriteValueAMD64_OpMaxMaskedUint64x4(v) + case OpMaxMaskedUint64x8: + return rewriteValueAMD64_OpMaxMaskedUint64x8(v) + case OpMaxMaskedUint8x16: + return rewriteValueAMD64_OpMaxMaskedUint8x16(v) + case OpMaxMaskedUint8x32: + return rewriteValueAMD64_OpMaxMaskedUint8x32(v) + case OpMaxMaskedUint8x64: + return rewriteValueAMD64_OpMaxMaskedUint8x64(v) case OpMaxUint16x16: v.Op = OpAMD64VPMAXUW256 return true @@ -3439,6 +2587,66 @@ func rewriteValueAMD64(v *Value) bool { case OpMinInt8x64: v.Op = OpAMD64VPMINSB512 return true + case OpMinMaskedFloat32x16: + return rewriteValueAMD64_OpMinMaskedFloat32x16(v) + case OpMinMaskedFloat32x4: + return rewriteValueAMD64_OpMinMaskedFloat32x4(v) + case OpMinMaskedFloat32x8: + return rewriteValueAMD64_OpMinMaskedFloat32x8(v) + case OpMinMaskedFloat64x2: + return rewriteValueAMD64_OpMinMaskedFloat64x2(v) + case OpMinMaskedFloat64x4: + return rewriteValueAMD64_OpMinMaskedFloat64x4(v) + case OpMinMaskedFloat64x8: + return rewriteValueAMD64_OpMinMaskedFloat64x8(v) + case OpMinMaskedInt16x16: + return rewriteValueAMD64_OpMinMaskedInt16x16(v) + case OpMinMaskedInt16x32: + return rewriteValueAMD64_OpMinMaskedInt16x32(v) + case OpMinMaskedInt16x8: + return rewriteValueAMD64_OpMinMaskedInt16x8(v) + case OpMinMaskedInt32x16: + return rewriteValueAMD64_OpMinMaskedInt32x16(v) + case OpMinMaskedInt32x4: + return rewriteValueAMD64_OpMinMaskedInt32x4(v) + case OpMinMaskedInt32x8: + return rewriteValueAMD64_OpMinMaskedInt32x8(v) + case OpMinMaskedInt64x2: + return rewriteValueAMD64_OpMinMaskedInt64x2(v) + case OpMinMaskedInt64x4: + return rewriteValueAMD64_OpMinMaskedInt64x4(v) + case OpMinMaskedInt64x8: + return rewriteValueAMD64_OpMinMaskedInt64x8(v) + case OpMinMaskedInt8x16: + return rewriteValueAMD64_OpMinMaskedInt8x16(v) + case OpMinMaskedInt8x32: + return rewriteValueAMD64_OpMinMaskedInt8x32(v) + case OpMinMaskedInt8x64: + return rewriteValueAMD64_OpMinMaskedInt8x64(v) + case OpMinMaskedUint16x16: + return rewriteValueAMD64_OpMinMaskedUint16x16(v) + case OpMinMaskedUint16x32: + return rewriteValueAMD64_OpMinMaskedUint16x32(v) + case OpMinMaskedUint16x8: + return rewriteValueAMD64_OpMinMaskedUint16x8(v) + case OpMinMaskedUint32x16: + return rewriteValueAMD64_OpMinMaskedUint32x16(v) + case OpMinMaskedUint32x4: + return rewriteValueAMD64_OpMinMaskedUint32x4(v) + case OpMinMaskedUint32x8: + return rewriteValueAMD64_OpMinMaskedUint32x8(v) + case OpMinMaskedUint64x2: + return rewriteValueAMD64_OpMinMaskedUint64x2(v) + case OpMinMaskedUint64x4: + return rewriteValueAMD64_OpMinMaskedUint64x4(v) + case OpMinMaskedUint64x8: + return rewriteValueAMD64_OpMinMaskedUint64x8(v) + case OpMinMaskedUint8x16: + return rewriteValueAMD64_OpMinMaskedUint8x16(v) + case OpMinMaskedUint8x32: + return rewriteValueAMD64_OpMinMaskedUint8x32(v) + case OpMinMaskedUint8x64: + return rewriteValueAMD64_OpMinMaskedUint8x64(v) case OpMinUint16x16: v.Op = OpAMD64VPMINUW256 return true @@ -3532,6 +2740,18 @@ func rewriteValueAMD64(v *Value) bool { case OpMulByPowOf2Float64x8: v.Op = OpAMD64VSCALEFPD512 return true + case OpMulByPowOf2MaskedFloat32x16: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v) + case OpMulByPowOf2MaskedFloat32x4: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v) + case OpMulByPowOf2MaskedFloat32x8: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v) + case OpMulByPowOf2MaskedFloat64x2: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v) + case OpMulByPowOf2MaskedFloat64x4: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v) + case OpMulByPowOf2MaskedFloat64x8: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v) case OpMulEvenWidenInt32x4: v.Op = OpAMD64VPMULDQ128 return true @@ -3547,6 +2767,18 @@ func rewriteValueAMD64(v *Value) bool { case OpMulEvenWidenInt64x8: v.Op = OpAMD64VPMULDQ512 return true + case OpMulEvenWidenMaskedInt64x2: + return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v) + case OpMulEvenWidenMaskedInt64x4: + return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v) + case OpMulEvenWidenMaskedInt64x8: + return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v) + case OpMulEvenWidenMaskedUint64x2: + return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v) + case OpMulEvenWidenMaskedUint64x4: + return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v) + case OpMulEvenWidenMaskedUint64x8: + return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v) case OpMulEvenWidenUint32x4: v.Op = OpAMD64VPMULUDQ128 return true @@ -3589,6 +2821,18 @@ func rewriteValueAMD64(v *Value) bool { case OpMulHighInt16x8: v.Op = OpAMD64VPMULHW128 return true + case OpMulHighMaskedInt16x16: + return rewriteValueAMD64_OpMulHighMaskedInt16x16(v) + case OpMulHighMaskedInt16x32: + return rewriteValueAMD64_OpMulHighMaskedInt16x32(v) + case OpMulHighMaskedInt16x8: + return rewriteValueAMD64_OpMulHighMaskedInt16x8(v) + case OpMulHighMaskedUint16x16: + return rewriteValueAMD64_OpMulHighMaskedUint16x16(v) + case OpMulHighMaskedUint16x32: + return rewriteValueAMD64_OpMulHighMaskedUint16x32(v) + case OpMulHighMaskedUint16x8: + return rewriteValueAMD64_OpMulHighMaskedUint16x8(v) case OpMulHighUint16x16: v.Op = OpAMD64VPMULHUW256 return true @@ -3625,6 +2869,36 @@ func rewriteValueAMD64(v *Value) bool { case OpMulLowInt64x8: v.Op = OpAMD64VPMULLQ512 return true + case OpMulLowMaskedInt16x16: + return rewriteValueAMD64_OpMulLowMaskedInt16x16(v) + case OpMulLowMaskedInt16x32: + return rewriteValueAMD64_OpMulLowMaskedInt16x32(v) + case OpMulLowMaskedInt16x8: + return rewriteValueAMD64_OpMulLowMaskedInt16x8(v) + case OpMulLowMaskedInt32x16: + return rewriteValueAMD64_OpMulLowMaskedInt32x16(v) + case OpMulLowMaskedInt32x4: + return rewriteValueAMD64_OpMulLowMaskedInt32x4(v) + case OpMulLowMaskedInt32x8: + return rewriteValueAMD64_OpMulLowMaskedInt32x8(v) + case OpMulLowMaskedInt64x2: + return rewriteValueAMD64_OpMulLowMaskedInt64x2(v) + case OpMulLowMaskedInt64x4: + return rewriteValueAMD64_OpMulLowMaskedInt64x4(v) + case OpMulLowMaskedInt64x8: + return rewriteValueAMD64_OpMulLowMaskedInt64x8(v) + case OpMulMaskedFloat32x16: + return rewriteValueAMD64_OpMulMaskedFloat32x16(v) + case OpMulMaskedFloat32x4: + return rewriteValueAMD64_OpMulMaskedFloat32x4(v) + case OpMulMaskedFloat32x8: + return rewriteValueAMD64_OpMulMaskedFloat32x8(v) + case OpMulMaskedFloat64x2: + return rewriteValueAMD64_OpMulMaskedFloat64x2(v) + case OpMulMaskedFloat64x4: + return rewriteValueAMD64_OpMulMaskedFloat64x4(v) + case OpMulMaskedFloat64x8: + return rewriteValueAMD64_OpMulMaskedFloat64x8(v) case OpNeg16: v.Op = OpAMD64NEGL return true @@ -3698,6 +2972,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualInt8x32(v) case OpNotEqualInt8x64: return rewriteValueAMD64_OpNotEqualInt8x64(v) + case OpNotEqualMaskedFloat32x16: + return rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v) + case OpNotEqualMaskedFloat32x4: + return rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v) + case OpNotEqualMaskedFloat32x8: + return rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v) + case OpNotEqualMaskedFloat64x2: + return rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v) + case OpNotEqualMaskedFloat64x4: + return rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v) + case OpNotEqualMaskedFloat64x8: + return rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v) + case OpNotEqualMaskedInt16x16: + return rewriteValueAMD64_OpNotEqualMaskedInt16x16(v) + case OpNotEqualMaskedInt16x32: + return rewriteValueAMD64_OpNotEqualMaskedInt16x32(v) + case OpNotEqualMaskedInt16x8: + return rewriteValueAMD64_OpNotEqualMaskedInt16x8(v) + case OpNotEqualMaskedInt32x16: + return rewriteValueAMD64_OpNotEqualMaskedInt32x16(v) + case OpNotEqualMaskedInt32x4: + return rewriteValueAMD64_OpNotEqualMaskedInt32x4(v) + case OpNotEqualMaskedInt32x8: + return rewriteValueAMD64_OpNotEqualMaskedInt32x8(v) + case OpNotEqualMaskedInt64x2: + return rewriteValueAMD64_OpNotEqualMaskedInt64x2(v) + case OpNotEqualMaskedInt64x4: + return rewriteValueAMD64_OpNotEqualMaskedInt64x4(v) + case OpNotEqualMaskedInt64x8: + return rewriteValueAMD64_OpNotEqualMaskedInt64x8(v) + case OpNotEqualMaskedInt8x16: + return rewriteValueAMD64_OpNotEqualMaskedInt8x16(v) + case OpNotEqualMaskedInt8x32: + return rewriteValueAMD64_OpNotEqualMaskedInt8x32(v) + case OpNotEqualMaskedInt8x64: + return rewriteValueAMD64_OpNotEqualMaskedInt8x64(v) + case OpNotEqualMaskedUint16x16: + return rewriteValueAMD64_OpNotEqualMaskedUint16x16(v) + case OpNotEqualMaskedUint16x32: + return rewriteValueAMD64_OpNotEqualMaskedUint16x32(v) + case OpNotEqualMaskedUint16x8: + return rewriteValueAMD64_OpNotEqualMaskedUint16x8(v) + case OpNotEqualMaskedUint32x16: + return rewriteValueAMD64_OpNotEqualMaskedUint32x16(v) + case OpNotEqualMaskedUint32x4: + return rewriteValueAMD64_OpNotEqualMaskedUint32x4(v) + case OpNotEqualMaskedUint32x8: + return rewriteValueAMD64_OpNotEqualMaskedUint32x8(v) + case OpNotEqualMaskedUint64x2: + return rewriteValueAMD64_OpNotEqualMaskedUint64x2(v) + case OpNotEqualMaskedUint64x4: + return rewriteValueAMD64_OpNotEqualMaskedUint64x4(v) + case OpNotEqualMaskedUint64x8: + return rewriteValueAMD64_OpNotEqualMaskedUint64x8(v) + case OpNotEqualMaskedUint8x16: + return rewriteValueAMD64_OpNotEqualMaskedUint8x16(v) + case OpNotEqualMaskedUint8x32: + return rewriteValueAMD64_OpNotEqualMaskedUint8x32(v) + case OpNotEqualMaskedUint8x64: + return rewriteValueAMD64_OpNotEqualMaskedUint8x64(v) case OpNotEqualUint16x16: return rewriteValueAMD64_OpNotEqualUint16x16(v) case OpNotEqualUint16x32: @@ -3769,6 +3103,30 @@ func rewriteValueAMD64(v *Value) bool { case OpOrInt8x32: v.Op = OpAMD64VPOR256 return true + case OpOrMaskedInt32x16: + return rewriteValueAMD64_OpOrMaskedInt32x16(v) + case OpOrMaskedInt32x4: + return rewriteValueAMD64_OpOrMaskedInt32x4(v) + case OpOrMaskedInt32x8: + return rewriteValueAMD64_OpOrMaskedInt32x8(v) + case OpOrMaskedInt64x2: + return rewriteValueAMD64_OpOrMaskedInt64x2(v) + case OpOrMaskedInt64x4: + return rewriteValueAMD64_OpOrMaskedInt64x4(v) + case OpOrMaskedInt64x8: + return rewriteValueAMD64_OpOrMaskedInt64x8(v) + case OpOrMaskedUint32x16: + return rewriteValueAMD64_OpOrMaskedUint32x16(v) + case OpOrMaskedUint32x4: + return rewriteValueAMD64_OpOrMaskedUint32x4(v) + case OpOrMaskedUint32x8: + return rewriteValueAMD64_OpOrMaskedUint32x8(v) + case OpOrMaskedUint64x2: + return rewriteValueAMD64_OpOrMaskedUint64x2(v) + case OpOrMaskedUint64x4: + return rewriteValueAMD64_OpOrMaskedUint64x4(v) + case OpOrMaskedUint64x8: + return rewriteValueAMD64_OpOrMaskedUint64x8(v) case OpOrUint16x16: v.Op = OpAMD64VPOR256 return true @@ -3808,6 +3166,12 @@ func rewriteValueAMD64(v *Value) bool { case OpPairDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPWSSD256 return true + case OpPairDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v) + case OpPairDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v) + case OpPairDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v) case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -3817,6 +3181,12 @@ func rewriteValueAMD64(v *Value) bool { case OpPairDotProdInt16x8: v.Op = OpAMD64VPMADDWD128 return true + case OpPairDotProdMaskedInt16x16: + return rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v) + case OpPairDotProdMaskedInt16x32: + return rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v) + case OpPairDotProdMaskedInt16x8: + return rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v) case OpPairwiseAddFloat32x4: v.Op = OpAMD64VHADDPS128 return true @@ -3937,6 +3307,54 @@ func rewriteValueAMD64(v *Value) bool { case OpPopCountInt8x64: v.Op = OpAMD64VPOPCNTB512 return true + case OpPopCountMaskedInt16x16: + return rewriteValueAMD64_OpPopCountMaskedInt16x16(v) + case OpPopCountMaskedInt16x32: + return rewriteValueAMD64_OpPopCountMaskedInt16x32(v) + case OpPopCountMaskedInt16x8: + return rewriteValueAMD64_OpPopCountMaskedInt16x8(v) + case OpPopCountMaskedInt32x16: + return rewriteValueAMD64_OpPopCountMaskedInt32x16(v) + case OpPopCountMaskedInt32x4: + return rewriteValueAMD64_OpPopCountMaskedInt32x4(v) + case OpPopCountMaskedInt32x8: + return rewriteValueAMD64_OpPopCountMaskedInt32x8(v) + case OpPopCountMaskedInt64x2: + return rewriteValueAMD64_OpPopCountMaskedInt64x2(v) + case OpPopCountMaskedInt64x4: + return rewriteValueAMD64_OpPopCountMaskedInt64x4(v) + case OpPopCountMaskedInt64x8: + return rewriteValueAMD64_OpPopCountMaskedInt64x8(v) + case OpPopCountMaskedInt8x16: + return rewriteValueAMD64_OpPopCountMaskedInt8x16(v) + case OpPopCountMaskedInt8x32: + return rewriteValueAMD64_OpPopCountMaskedInt8x32(v) + case OpPopCountMaskedInt8x64: + return rewriteValueAMD64_OpPopCountMaskedInt8x64(v) + case OpPopCountMaskedUint16x16: + return rewriteValueAMD64_OpPopCountMaskedUint16x16(v) + case OpPopCountMaskedUint16x32: + return rewriteValueAMD64_OpPopCountMaskedUint16x32(v) + case OpPopCountMaskedUint16x8: + return rewriteValueAMD64_OpPopCountMaskedUint16x8(v) + case OpPopCountMaskedUint32x16: + return rewriteValueAMD64_OpPopCountMaskedUint32x16(v) + case OpPopCountMaskedUint32x4: + return rewriteValueAMD64_OpPopCountMaskedUint32x4(v) + case OpPopCountMaskedUint32x8: + return rewriteValueAMD64_OpPopCountMaskedUint32x8(v) + case OpPopCountMaskedUint64x2: + return rewriteValueAMD64_OpPopCountMaskedUint64x2(v) + case OpPopCountMaskedUint64x4: + return rewriteValueAMD64_OpPopCountMaskedUint64x4(v) + case OpPopCountMaskedUint64x8: + return rewriteValueAMD64_OpPopCountMaskedUint64x8(v) + case OpPopCountMaskedUint8x16: + return rewriteValueAMD64_OpPopCountMaskedUint8x16(v) + case OpPopCountMaskedUint8x32: + return rewriteValueAMD64_OpPopCountMaskedUint8x32(v) + case OpPopCountMaskedUint8x64: + return rewriteValueAMD64_OpPopCountMaskedUint8x64(v) case OpPopCountUint16x16: v.Op = OpAMD64VPOPCNTW256 return true @@ -3991,6 +3409,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRotateAllLeftInt64x4(v) case OpRotateAllLeftInt64x8: return rewriteValueAMD64_OpRotateAllLeftInt64x8(v) + case OpRotateAllLeftMaskedInt32x16: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v) + case OpRotateAllLeftMaskedInt32x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v) + case OpRotateAllLeftMaskedInt32x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v) + case OpRotateAllLeftMaskedInt64x2: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v) + case OpRotateAllLeftMaskedInt64x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v) + case OpRotateAllLeftMaskedInt64x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v) + case OpRotateAllLeftMaskedUint32x16: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v) + case OpRotateAllLeftMaskedUint32x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v) + case OpRotateAllLeftMaskedUint32x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v) + case OpRotateAllLeftMaskedUint64x2: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v) + case OpRotateAllLeftMaskedUint64x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v) + case OpRotateAllLeftMaskedUint64x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v) case OpRotateAllLeftUint32x16: return rewriteValueAMD64_OpRotateAllLeftUint32x16(v) case OpRotateAllLeftUint32x4: @@ -4015,6 +3457,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRotateAllRightInt64x4(v) case OpRotateAllRightInt64x8: return rewriteValueAMD64_OpRotateAllRightInt64x8(v) + case OpRotateAllRightMaskedInt32x16: + return rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v) + case OpRotateAllRightMaskedInt32x4: + return rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v) + case OpRotateAllRightMaskedInt32x8: + return rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v) + case OpRotateAllRightMaskedInt64x2: + return rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v) + case OpRotateAllRightMaskedInt64x4: + return rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v) + case OpRotateAllRightMaskedInt64x8: + return rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v) + case OpRotateAllRightMaskedUint32x16: + return rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v) + case OpRotateAllRightMaskedUint32x4: + return rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v) + case OpRotateAllRightMaskedUint32x8: + return rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v) + case OpRotateAllRightMaskedUint64x2: + return rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v) + case OpRotateAllRightMaskedUint64x4: + return rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v) + case OpRotateAllRightMaskedUint64x8: + return rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v) case OpRotateAllRightUint32x16: return rewriteValueAMD64_OpRotateAllRightUint32x16(v) case OpRotateAllRightUint32x4: @@ -4057,6 +3523,30 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateLeftInt64x8: v.Op = OpAMD64VPROLVQ512 return true + case OpRotateLeftMaskedInt32x16: + return rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v) + case OpRotateLeftMaskedInt32x4: + return rewriteValueAMD64_OpRotateLeftMaskedInt32x4(v) + case OpRotateLeftMaskedInt32x8: + return rewriteValueAMD64_OpRotateLeftMaskedInt32x8(v) + case OpRotateLeftMaskedInt64x2: + return rewriteValueAMD64_OpRotateLeftMaskedInt64x2(v) + case OpRotateLeftMaskedInt64x4: + return rewriteValueAMD64_OpRotateLeftMaskedInt64x4(v) + case OpRotateLeftMaskedInt64x8: + return rewriteValueAMD64_OpRotateLeftMaskedInt64x8(v) + case OpRotateLeftMaskedUint32x16: + return rewriteValueAMD64_OpRotateLeftMaskedUint32x16(v) + case OpRotateLeftMaskedUint32x4: + return rewriteValueAMD64_OpRotateLeftMaskedUint32x4(v) + case OpRotateLeftMaskedUint32x8: + return rewriteValueAMD64_OpRotateLeftMaskedUint32x8(v) + case OpRotateLeftMaskedUint64x2: + return rewriteValueAMD64_OpRotateLeftMaskedUint64x2(v) + case OpRotateLeftMaskedUint64x4: + return rewriteValueAMD64_OpRotateLeftMaskedUint64x4(v) + case OpRotateLeftMaskedUint64x8: + return rewriteValueAMD64_OpRotateLeftMaskedUint64x8(v) case OpRotateLeftUint32x16: v.Op = OpAMD64VPROLVD512 return true @@ -4093,6 +3583,30 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateRightInt64x8: v.Op = OpAMD64VPRORVQ512 return true + case OpRotateRightMaskedInt32x16: + return rewriteValueAMD64_OpRotateRightMaskedInt32x16(v) + case OpRotateRightMaskedInt32x4: + return rewriteValueAMD64_OpRotateRightMaskedInt32x4(v) + case OpRotateRightMaskedInt32x8: + return rewriteValueAMD64_OpRotateRightMaskedInt32x8(v) + case OpRotateRightMaskedInt64x2: + return rewriteValueAMD64_OpRotateRightMaskedInt64x2(v) + case OpRotateRightMaskedInt64x4: + return rewriteValueAMD64_OpRotateRightMaskedInt64x4(v) + case OpRotateRightMaskedInt64x8: + return rewriteValueAMD64_OpRotateRightMaskedInt64x8(v) + case OpRotateRightMaskedUint32x16: + return rewriteValueAMD64_OpRotateRightMaskedUint32x16(v) + case OpRotateRightMaskedUint32x4: + return rewriteValueAMD64_OpRotateRightMaskedUint32x4(v) + case OpRotateRightMaskedUint32x8: + return rewriteValueAMD64_OpRotateRightMaskedUint32x8(v) + case OpRotateRightMaskedUint64x2: + return rewriteValueAMD64_OpRotateRightMaskedUint64x2(v) + case OpRotateRightMaskedUint64x4: + return rewriteValueAMD64_OpRotateRightMaskedUint64x4(v) + case OpRotateRightMaskedUint64x8: + return rewriteValueAMD64_OpRotateRightMaskedUint64x8(v) case OpRotateRightUint32x16: v.Op = OpAMD64VPRORVD512 return true @@ -4139,6 +3653,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v) case OpRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v) + case OpRoundWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v) + case OpRoundWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v) + case OpRoundWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v) + case OpRoundWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v) + case OpRoundWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v) + case OpRoundWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -4221,6 +3747,30 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedAddInt8x64: v.Op = OpAMD64VPADDSB512 return true + case OpSaturatedAddMaskedInt16x16: + return rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v) + case OpSaturatedAddMaskedInt16x32: + return rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v) + case OpSaturatedAddMaskedInt16x8: + return rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v) + case OpSaturatedAddMaskedInt8x16: + return rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v) + case OpSaturatedAddMaskedInt8x32: + return rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v) + case OpSaturatedAddMaskedInt8x64: + return rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v) + case OpSaturatedAddMaskedUint16x16: + return rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v) + case OpSaturatedAddMaskedUint16x32: + return rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v) + case OpSaturatedAddMaskedUint16x8: + return rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v) + case OpSaturatedAddMaskedUint8x16: + return rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v) + case OpSaturatedAddMaskedUint8x32: + return rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v) + case OpSaturatedAddMaskedUint8x64: + return rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v) case OpSaturatedAddUint16x16: v.Op = OpAMD64VPADDSW256 return true @@ -4248,6 +3798,12 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedPairDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPWSSDS256 return true + case OpSaturatedPairDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v) + case OpSaturatedPairDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v) + case OpSaturatedPairDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v) case OpSaturatedPairwiseAddInt16x16: v.Op = OpAMD64VPHADDSW256 return true @@ -4278,6 +3834,30 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubInt8x64: v.Op = OpAMD64VPSUBSB512 return true + case OpSaturatedSubMaskedInt16x16: + return rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v) + case OpSaturatedSubMaskedInt16x32: + return rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v) + case OpSaturatedSubMaskedInt16x8: + return rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v) + case OpSaturatedSubMaskedInt8x16: + return rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v) + case OpSaturatedSubMaskedInt8x32: + return rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v) + case OpSaturatedSubMaskedInt8x64: + return rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v) + case OpSaturatedSubMaskedUint16x16: + return rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v) + case OpSaturatedSubMaskedUint16x32: + return rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v) + case OpSaturatedSubMaskedUint16x8: + return rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v) + case OpSaturatedSubMaskedUint8x16: + return rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v) + case OpSaturatedSubMaskedUint8x32: + return rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v) + case OpSaturatedSubMaskedUint8x64: + return rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v) case OpSaturatedSubUint16x16: v.Op = OpAMD64VPSUBSW256 return true @@ -4296,6 +3876,12 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubUint8x64: v.Op = OpAMD64VPSUBSB512 return true + case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16: + return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v) + case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32: + return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v) + case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64: + return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v) case OpSaturatedUnsignedSignedPairDotProdUint8x16: v.Op = OpAMD64VPMADDUBSW128 return true @@ -4314,6 +3900,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPBUSDS256 return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: v.Op = OpAMD64VPDPBUSDS512 return true @@ -4383,6 +3981,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v) case OpShiftAllLeftAndFillUpperFromInt64x8: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v) case OpShiftAllLeftAndFillUpperFromUint16x16: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v) case OpShiftAllLeftAndFillUpperFromUint16x32: @@ -4422,6 +4056,18 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftInt64x8: v.Op = OpAMD64VPSLLQ512 return true + case OpShiftAllLeftMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v) + case OpShiftAllLeftMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v) + case OpShiftAllLeftMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v) + case OpShiftAllLeftMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v) + case OpShiftAllLeftMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v) + case OpShiftAllLeftMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v) case OpShiftAllLeftUint16x16: v.Op = OpAMD64VPSLLW256 return true @@ -4461,6 +4107,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v) case OpShiftAllRightAndFillUpperFromInt64x8: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v) + case OpShiftAllRightAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v) + case OpShiftAllRightAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v) + case OpShiftAllRightAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v) + case OpShiftAllRightAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v) + case OpShiftAllRightAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v) + case OpShiftAllRightAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v) + case OpShiftAllRightAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v) + case OpShiftAllRightAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v) + case OpShiftAllRightAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v) + case OpShiftAllRightAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v) + case OpShiftAllRightAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v) + case OpShiftAllRightAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v) + case OpShiftAllRightAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v) + case OpShiftAllRightAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v) + case OpShiftAllRightAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v) + case OpShiftAllRightAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v) + case OpShiftAllRightAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v) + case OpShiftAllRightAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v) case OpShiftAllRightAndFillUpperFromUint16x16: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v) case OpShiftAllRightAndFillUpperFromUint16x32: @@ -4500,6 +4182,18 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightInt64x8: v.Op = OpAMD64VPSRLQ512 return true + case OpShiftAllRightMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v) + case OpShiftAllRightMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v) + case OpShiftAllRightMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v) + case OpShiftAllRightMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v) + case OpShiftAllRightMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v) + case OpShiftAllRightMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) case OpShiftAllRightSignExtendedInt16x16: v.Op = OpAMD64VPSRAW256 return true @@ -4521,6 +4215,12 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightSignExtendedInt64x8: v.Op = OpAMD64VPSRAQ512 return true + case OpShiftAllRightSignExtendedMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v) + case OpShiftAllRightSignExtendedMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v) + case OpShiftAllRightSignExtendedMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v) case OpShiftAllRightUint16x16: v.Op = OpAMD64VPSRLW256 return true @@ -4569,6 +4269,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftAndFillUpperFromInt64x8: v.Op = OpAMD64VPSHLDVQ512 return true + case OpShiftLeftAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v) + case OpShiftLeftAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v) + case OpShiftLeftAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v) + case OpShiftLeftAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v) + case OpShiftLeftAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v) + case OpShiftLeftAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v) + case OpShiftLeftAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v) + case OpShiftLeftAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v) + case OpShiftLeftAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v) + case OpShiftLeftAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v) + case OpShiftLeftAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v) + case OpShiftLeftAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v) + case OpShiftLeftAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v) + case OpShiftLeftAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v) + case OpShiftLeftAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v) + case OpShiftLeftAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v) + case OpShiftLeftAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v) + case OpShiftLeftAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v) case OpShiftLeftAndFillUpperFromUint16x16: v.Op = OpAMD64VPSHLDVW256 return true @@ -4623,6 +4359,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftInt64x8: v.Op = OpAMD64VPSLLVQ512 return true + case OpShiftLeftMaskedInt16x16: + return rewriteValueAMD64_OpShiftLeftMaskedInt16x16(v) + case OpShiftLeftMaskedInt16x32: + return rewriteValueAMD64_OpShiftLeftMaskedInt16x32(v) + case OpShiftLeftMaskedInt16x8: + return rewriteValueAMD64_OpShiftLeftMaskedInt16x8(v) + case OpShiftLeftMaskedInt32x16: + return rewriteValueAMD64_OpShiftLeftMaskedInt32x16(v) + case OpShiftLeftMaskedInt32x4: + return rewriteValueAMD64_OpShiftLeftMaskedInt32x4(v) + case OpShiftLeftMaskedInt32x8: + return rewriteValueAMD64_OpShiftLeftMaskedInt32x8(v) + case OpShiftLeftMaskedInt64x2: + return rewriteValueAMD64_OpShiftLeftMaskedInt64x2(v) + case OpShiftLeftMaskedInt64x4: + return rewriteValueAMD64_OpShiftLeftMaskedInt64x4(v) + case OpShiftLeftMaskedInt64x8: + return rewriteValueAMD64_OpShiftLeftMaskedInt64x8(v) + case OpShiftLeftMaskedUint16x16: + return rewriteValueAMD64_OpShiftLeftMaskedUint16x16(v) + case OpShiftLeftMaskedUint16x32: + return rewriteValueAMD64_OpShiftLeftMaskedUint16x32(v) + case OpShiftLeftMaskedUint16x8: + return rewriteValueAMD64_OpShiftLeftMaskedUint16x8(v) + case OpShiftLeftMaskedUint32x16: + return rewriteValueAMD64_OpShiftLeftMaskedUint32x16(v) + case OpShiftLeftMaskedUint32x4: + return rewriteValueAMD64_OpShiftLeftMaskedUint32x4(v) + case OpShiftLeftMaskedUint32x8: + return rewriteValueAMD64_OpShiftLeftMaskedUint32x8(v) + case OpShiftLeftMaskedUint64x2: + return rewriteValueAMD64_OpShiftLeftMaskedUint64x2(v) + case OpShiftLeftMaskedUint64x4: + return rewriteValueAMD64_OpShiftLeftMaskedUint64x4(v) + case OpShiftLeftMaskedUint64x8: + return rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v) case OpShiftLeftUint16x16: v.Op = OpAMD64VPSLLVW256 return true @@ -4677,6 +4449,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightAndFillUpperFromInt64x8: v.Op = OpAMD64VPSHRDVQ512 return true + case OpShiftRightAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v) + case OpShiftRightAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v) + case OpShiftRightAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v) + case OpShiftRightAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v) + case OpShiftRightAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v) + case OpShiftRightAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v) + case OpShiftRightAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v) + case OpShiftRightAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v) + case OpShiftRightAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v) + case OpShiftRightAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v) + case OpShiftRightAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v) + case OpShiftRightAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v) + case OpShiftRightAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v) + case OpShiftRightAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v) + case OpShiftRightAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v) + case OpShiftRightAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v) + case OpShiftRightAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v) + case OpShiftRightAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v) case OpShiftRightAndFillUpperFromUint16x16: v.Op = OpAMD64VPSHRDVW256 return true @@ -4731,6 +4539,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightInt64x8: v.Op = OpAMD64VPSRLVQ512 return true + case OpShiftRightMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightMaskedInt16x16(v) + case OpShiftRightMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightMaskedInt16x32(v) + case OpShiftRightMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightMaskedInt16x8(v) + case OpShiftRightMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightMaskedInt32x16(v) + case OpShiftRightMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightMaskedInt32x4(v) + case OpShiftRightMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightMaskedInt32x8(v) + case OpShiftRightMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightMaskedInt64x2(v) + case OpShiftRightMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightMaskedInt64x4(v) + case OpShiftRightMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightMaskedInt64x8(v) + case OpShiftRightMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightMaskedUint16x16(v) + case OpShiftRightMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightMaskedUint16x32(v) + case OpShiftRightMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightMaskedUint16x8(v) + case OpShiftRightMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightMaskedUint32x16(v) + case OpShiftRightMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightMaskedUint32x4(v) + case OpShiftRightMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightMaskedUint32x8(v) + case OpShiftRightMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightMaskedUint64x2(v) + case OpShiftRightMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightMaskedUint64x4(v) + case OpShiftRightMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightMaskedUint64x8(v) case OpShiftRightSignExtendedInt16x16: v.Op = OpAMD64VPSRAVW256 return true @@ -4758,6 +4602,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightSignExtendedInt64x8: v.Op = OpAMD64VPSRAVQ512 return true + case OpShiftRightSignExtendedMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v) + case OpShiftRightSignExtendedMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v) + case OpShiftRightSignExtendedMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v) + case OpShiftRightSignExtendedMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v) + case OpShiftRightSignExtendedMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v) + case OpShiftRightSignExtendedMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v) + case OpShiftRightSignExtendedMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v) + case OpShiftRightSignExtendedMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v) + case OpShiftRightSignExtendedMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v) + case OpShiftRightSignExtendedMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v) + case OpShiftRightSignExtendedMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v) + case OpShiftRightSignExtendedMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v) + case OpShiftRightSignExtendedMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v) + case OpShiftRightSignExtendedMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v) + case OpShiftRightSignExtendedMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v) + case OpShiftRightSignExtendedMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v) + case OpShiftRightSignExtendedMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v) + case OpShiftRightSignExtendedMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v) case OpShiftRightSignExtendedUint16x16: v.Op = OpAMD64VPSRAVW256 return true @@ -4878,6 +4758,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSqrtFloat64x8: v.Op = OpAMD64VSQRTPD512 return true + case OpSqrtMaskedFloat32x16: + return rewriteValueAMD64_OpSqrtMaskedFloat32x16(v) + case OpSqrtMaskedFloat32x4: + return rewriteValueAMD64_OpSqrtMaskedFloat32x4(v) + case OpSqrtMaskedFloat32x8: + return rewriteValueAMD64_OpSqrtMaskedFloat32x8(v) + case OpSqrtMaskedFloat64x2: + return rewriteValueAMD64_OpSqrtMaskedFloat64x2(v) + case OpSqrtMaskedFloat64x4: + return rewriteValueAMD64_OpSqrtMaskedFloat64x4(v) + case OpSqrtMaskedFloat64x8: + return rewriteValueAMD64_OpSqrtMaskedFloat64x8(v) case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -4955,6 +4847,66 @@ func rewriteValueAMD64(v *Value) bool { case OpSubInt8x64: v.Op = OpAMD64VPSUBB512 return true + case OpSubMaskedFloat32x16: + return rewriteValueAMD64_OpSubMaskedFloat32x16(v) + case OpSubMaskedFloat32x4: + return rewriteValueAMD64_OpSubMaskedFloat32x4(v) + case OpSubMaskedFloat32x8: + return rewriteValueAMD64_OpSubMaskedFloat32x8(v) + case OpSubMaskedFloat64x2: + return rewriteValueAMD64_OpSubMaskedFloat64x2(v) + case OpSubMaskedFloat64x4: + return rewriteValueAMD64_OpSubMaskedFloat64x4(v) + case OpSubMaskedFloat64x8: + return rewriteValueAMD64_OpSubMaskedFloat64x8(v) + case OpSubMaskedInt16x16: + return rewriteValueAMD64_OpSubMaskedInt16x16(v) + case OpSubMaskedInt16x32: + return rewriteValueAMD64_OpSubMaskedInt16x32(v) + case OpSubMaskedInt16x8: + return rewriteValueAMD64_OpSubMaskedInt16x8(v) + case OpSubMaskedInt32x16: + return rewriteValueAMD64_OpSubMaskedInt32x16(v) + case OpSubMaskedInt32x4: + return rewriteValueAMD64_OpSubMaskedInt32x4(v) + case OpSubMaskedInt32x8: + return rewriteValueAMD64_OpSubMaskedInt32x8(v) + case OpSubMaskedInt64x2: + return rewriteValueAMD64_OpSubMaskedInt64x2(v) + case OpSubMaskedInt64x4: + return rewriteValueAMD64_OpSubMaskedInt64x4(v) + case OpSubMaskedInt64x8: + return rewriteValueAMD64_OpSubMaskedInt64x8(v) + case OpSubMaskedInt8x16: + return rewriteValueAMD64_OpSubMaskedInt8x16(v) + case OpSubMaskedInt8x32: + return rewriteValueAMD64_OpSubMaskedInt8x32(v) + case OpSubMaskedInt8x64: + return rewriteValueAMD64_OpSubMaskedInt8x64(v) + case OpSubMaskedUint16x16: + return rewriteValueAMD64_OpSubMaskedUint16x16(v) + case OpSubMaskedUint16x32: + return rewriteValueAMD64_OpSubMaskedUint16x32(v) + case OpSubMaskedUint16x8: + return rewriteValueAMD64_OpSubMaskedUint16x8(v) + case OpSubMaskedUint32x16: + return rewriteValueAMD64_OpSubMaskedUint32x16(v) + case OpSubMaskedUint32x4: + return rewriteValueAMD64_OpSubMaskedUint32x4(v) + case OpSubMaskedUint32x8: + return rewriteValueAMD64_OpSubMaskedUint32x8(v) + case OpSubMaskedUint64x2: + return rewriteValueAMD64_OpSubMaskedUint64x2(v) + case OpSubMaskedUint64x4: + return rewriteValueAMD64_OpSubMaskedUint64x4(v) + case OpSubMaskedUint64x8: + return rewriteValueAMD64_OpSubMaskedUint64x8(v) + case OpSubMaskedUint8x16: + return rewriteValueAMD64_OpSubMaskedUint8x16(v) + case OpSubMaskedUint8x32: + return rewriteValueAMD64_OpSubMaskedUint8x32(v) + case OpSubMaskedUint8x64: + return rewriteValueAMD64_OpSubMaskedUint8x64(v) case OpSubPtr: v.Op = OpAMD64SUBQ return true @@ -5037,6 +4989,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) case OpTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) + case OpTruncWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v) + case OpTruncWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v) + case OpTruncWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v) + case OpTruncWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v) + case OpTruncWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v) + case OpTruncWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v) case OpUnsignedSignedQuadDotProdAccumulateInt32x16: v.Op = OpAMD64VPDPBUSD512 return true @@ -5046,6 +5010,18 @@ func rewriteValueAMD64(v *Value) bool { case OpUnsignedSignedQuadDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPBUSD256 return true + case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) case OpUnsignedSignedQuadDotProdAccumulateUint32x16: v.Op = OpAMD64VPDPBUSD512 return true @@ -5100,6 +5076,30 @@ func rewriteValueAMD64(v *Value) bool { case OpXorInt8x32: v.Op = OpAMD64VPXOR256 return true + case OpXorMaskedInt32x16: + return rewriteValueAMD64_OpXorMaskedInt32x16(v) + case OpXorMaskedInt32x4: + return rewriteValueAMD64_OpXorMaskedInt32x4(v) + case OpXorMaskedInt32x8: + return rewriteValueAMD64_OpXorMaskedInt32x8(v) + case OpXorMaskedInt64x2: + return rewriteValueAMD64_OpXorMaskedInt64x2(v) + case OpXorMaskedInt64x4: + return rewriteValueAMD64_OpXorMaskedInt64x4(v) + case OpXorMaskedInt64x8: + return rewriteValueAMD64_OpXorMaskedInt64x8(v) + case OpXorMaskedUint32x16: + return rewriteValueAMD64_OpXorMaskedUint32x16(v) + case OpXorMaskedUint32x4: + return rewriteValueAMD64_OpXorMaskedUint32x4(v) + case OpXorMaskedUint32x8: + return rewriteValueAMD64_OpXorMaskedUint32x8(v) + case OpXorMaskedUint64x2: + return rewriteValueAMD64_OpXorMaskedUint64x2(v) + case OpXorMaskedUint64x4: + return rewriteValueAMD64_OpXorMaskedUint64x4(v) + case OpXorMaskedUint64x8: + return rewriteValueAMD64_OpXorMaskedUint64x8(v) case OpXorUint16x16: v.Op = OpAMD64VPXOR256 return true @@ -27834,8704 +27834,8578 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAddr(v *Value) bool { +func rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) + b := v.Block + // match: (AbsoluteMaskedInt16x16 x mask) + // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (AbsoluteMaskedInt16x32 x mask) + // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (AbsoluteMaskedInt16x8 x mask) + // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt32x16 x mask) + // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt32x4 x mask) + // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt32x8 x mask) + // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt64x2 x mask) + // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) + b := v.Block + // match: (AbsoluteMaskedInt64x4 x mask) + // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) + b := v.Block + // match: (AbsoluteMaskedInt64x8 x mask) + // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) + b := v.Block + // match: (AbsoluteMaskedInt8x16 x mask) + // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) + b := v.Block + // match: (AbsoluteMaskedInt8x32 x mask) + // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) + b := v.Block + // match: (AbsoluteMaskedInt8x64 x mask) + // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) + b := v.Block + // match: (AddMaskedFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) + b := v.Block + // match: (AddMaskedFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) + b := v.Block + // match: (AddMaskedInt16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) + b := v.Block + // match: (AddMaskedInt16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) + // match: (AddMaskedInt16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddMaskedInt32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) + // match: (AddMaskedInt32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddMaskedInt32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpBitLen16(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) + // match: (AddMaskedInt64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) +} +func rewriteValueAMD64_OpAddMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedInt64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBitLen32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + // match: (AddMaskedInt64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) +} +func rewriteValueAMD64_OpAddMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedInt8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBitLen64(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + // match: (AddMaskedInt8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) +} +func rewriteValueAMD64_OpAddMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedInt8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBitLen8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + // match: (AddMaskedUint16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) +} +func rewriteValueAMD64_OpAddMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedUint16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBswap16(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) + b := v.Block + // match: (AddMaskedUint16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeil(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) + b := v.Block + // match: (AddMaskedUint32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat32x4 x) - // result: (VROUNDPS128 [2] x) + b := v.Block + // match: (AddMaskedUint32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat32x8 x) - // result: (VROUNDPS256 [2] x) + b := v.Block + // match: (AddMaskedUint32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat64x2 x) - // result: (VROUNDPD128 [2] x) + b := v.Block + // match: (AddMaskedUint64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat64x4 x) - // result: (VROUNDPD256 [2] x) + b := v.Block + // match: (AddMaskedUint64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+2] x) + b := v.Block + // match: (AddMaskedUint64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+2] x) + b := v.Block + // match: (AddMaskedUint8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+2] x) + b := v.Block + // match: (AddMaskedUint8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+2] x) + b := v.Block + // match: (AddMaskedUint8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+2] x) + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAndMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+2] x) + b := v.Block + // match: (AndMaskedInt32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCondSelect(v *Value) bool { +func rewriteValueAMD64_OpAndMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) + // match: (AndMaskedInt32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { - break - } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - return false -} -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) - for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) - return true - } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) - for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpCtz16(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) + // match: (ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + // match: (ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz32(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) + // match: (ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + // match: (ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz64(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) - return true - } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) + // match: (ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) - return true - } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) + // match: (ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) - for { - x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+2] x) + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+2] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+2] x) + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+2] x) + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+2] x) + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+2] x) + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+1] x) + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+1] x) + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+1] x) + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+1] x) + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+1] x) + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+1] x) + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+0] x) + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+0] x) + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+0] x) + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+0] x) + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+0] x) + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+0] x) + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+3] x) + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+3] x) + b := v.Block + // match: (AverageMaskedUint16x16 x y mask) + // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv16(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) + // match: (AverageMaskedUint16x32 x y mask) + // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv16u(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) + // match: (AverageMaskedUint16x8 x y mask) + // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv32(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) + // match: (AverageMaskedUint8x16 x y mask) + // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv32u(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) + // match: (AverageMaskedUint8x32 x y mask) + // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv64(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) + // match: (AverageMaskedUint8x64 x y mask) + // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv64u(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) for { x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpDiv8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) for { x := v_0 - y := v_1 + if !(buildcfg.GOAMD64 < 3) { + break + } v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpDiv8u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] - // match: (DotProdBroadcastFloat64x2 x y) - // result: (VDPPD128 [127] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VDPPD128) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpEq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpEq32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpEq32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpEq64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) + // match: (Bswap16 x) + // result: (ROLWconst [8] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEq64F(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeil(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) + // match: (Ceil x) + // result: (ROUNDSD [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEq8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) + // match: (CeilFloat32x4 x) + // result: (VROUNDPS128 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqB(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) + // match: (CeilFloat32x8 x) + // result: (VROUNDPS256 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqPtr(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) + // match: (CeilFloat64x2 x) + // result: (VROUNDPD128 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) + // match: (CeilFloat64x4 x) + // result: (VROUNDPD256 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat32x4 x y) - // result: (VCMPPS128 [0] x y) + // match: (CeilWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat32x8 x y) - // result: (VCMPPS256 [0] x y) + // match: (CeilWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat64x2 x y) - // result: (VCMPPD128 [0] x y) + // match: (CeilWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat64x4 x y) - // result: (VCMPPD256 [0] x y) + // match: (CeilWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) + // match: (CeilWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) + // match: (CeilWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloor(v *Value) bool { - v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat32x4 x) - // result: (VROUNDPS128 [1] x) + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat32x8 x) - // result: (VROUNDPS256 [1] x) + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat64x2 x) - // result: (VROUNDPD128 [1] x) + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat64x4 x) - // result: (VROUNDPD256 [1] x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+1] x) + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+1] x) + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+1] x) + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+1] x) + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+1] x) + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+1] x) + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) - // result: (VGF2P8AFFINEINVQB128 [a] x y) + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) - // result: (VGF2P8AFFINEINVQB256 [a] x y) + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) - // result: (VGF2P8AFFINEINVQB512 [a] x y) + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x16 [a] x y) - // result: (VGF2P8AFFINEQB128 [a] x y) + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x32 [a] x y) - // result: (VGF2P8AFFINEQB256 [a] x y) + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x64 [a] x y) - // result: (VGF2P8AFFINEQB512 [a] x y) + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float32x8 [a] x) - // result: (VEXTRACTF128128 [a] x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float64x4 [a] x) - // result: (VEXTRACTF128128 [a] x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt16x8 [a] x) - // result: (VPEXTRW128 [a] x) + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt32x4 [a] x) - // result: (VPEXTRD128 [a] x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt64x2 [a] x) - // result: (VPEXTRQ128 [a] x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt8x16 [a] x) - // result: (VPEXTRB128 [a] x) + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint16x8 [a] x) - // result: (VPEXTRW128 [a] x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint32x4 [a] x) - // result: (VPEXTRD128 [a] x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint64x2 [a] x) - // result: (VPEXTRQ128 [a] x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint8x16 [a] x) - // result: (VPEXTRB128 [a] x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpGetG(v *Value) bool { - v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { break } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } - return false -} -func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [13] x y) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [13] x y) +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [13] x y) +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [13] x y) +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz8(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [14] x y) + b := v.Block + // match: (DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [14] x y) + // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [14] x y) + // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [14] x y) + // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) + // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) + // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) + // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + // match: (DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) + // match: (DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat32x4 x y) - // result: (VCMPPS128 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat32x8 x y) - // result: (VCMPPS256 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat64x2 x y) - // result: (VCMPPD128 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat64x4 x y) - // result: (VCMPPD256 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) + // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) + // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+3] x) for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) + // match: (DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq16U(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32F(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) + // match: (DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32U(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64(v *Value) bool { +func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq64F(v *Value) bool { +func rewriteValueAMD64_OpDiv16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq64U(v *Value) bool { +func rewriteValueAMD64_OpDiv32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq8(v *Value) bool { +func rewriteValueAMD64_OpDiv32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq8U(v *Value) bool { +func rewriteValueAMD64_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) + typ := &b.Func.Config.Types + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess16(v *Value) bool { +func rewriteValueAMD64_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess16U(v *Value) bool { +func rewriteValueAMD64_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32(v *Value) bool { +func rewriteValueAMD64_OpDiv8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32F(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) + // match: (DivMaskedFloat32x16 x y mask) + // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess32U(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) + // match: (DivMaskedFloat32x4 x y mask) + // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) + // match: (DivMaskedFloat32x8 x y mask) + // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64F(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) + // match: (DivMaskedFloat64x2 x y mask) + // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64U(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) + // match: (DivMaskedFloat64x4 x y mask) + // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) + // match: (DivMaskedFloat64x8 x y mask) + // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8U(v *Value) bool { +func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) + // match: (DotProdBroadcastFloat64x2 x y) + // result: (VDPPD128 [127] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VDPPD128) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x4 x y) - // result: (VCMPPS128 [2] x y) + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpEq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x8 x y) - // result: (VCMPPS256 [2] x y) + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpEq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x2 x y) - // result: (VCMPPD128 [2] x y) + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpEq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x4 x y) - // result: (VCMPPD256 [2] x y) + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpEq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpEqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpEqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) + // match: (EqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) + // match: (EqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) + // match: (EqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) + // match: (EqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) + // match: (EqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) + // match: (EqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) + // match: (EqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) + // match: (EqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) + // match: (EqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) + // match: (EqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) + // match: (EqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) + // match: (EqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) + // match: (EqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x4 x y) - // result: (VCMPPS128 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x8 x y) - // result: (VCMPPS256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x2 x y) - // result: (VCMPPD128 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x4 x y) - // result: (VCMPPD256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) + // match: (EqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) + // match: (EqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) + // match: (EqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) + // match: (EqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) + // match: (EqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) + // match: (EqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) + // match: (EqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) + // match: (EqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) + // match: (EqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) + // match: (EqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) + // match: (EqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) + // match: (EqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) + // match: (EqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) + // match: (EqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) + // match: (EqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) + // match: (EqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) + // match: (EqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) + // match: (EqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) + // match: (EqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) + // match: (EqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) + // match: (EqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLoad(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) - return true - } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) return true } - return false } -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloor(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (Floor x) + // result: (ROUNDSD [1] x) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat32x4 x) + // result: (VROUNDPS128 [1] x) for { x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (FloorFloat32x8 x) + // result: (VROUNDPS256 [1] x) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat64x2 x) + // result: (VROUNDPD128 [1] x) for { x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (FloorFloat64x4 x) + // result: (VROUNDPD256 [1] x) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (FloorWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+1] x) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (FloorWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+1] x) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + // match: (FloorWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+1] x) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + // match: (FloorWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + // match: (FloorWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + // match: (FloorWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddMaskedFloat32x16 x y z mask) + // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (FusedMultiplyAddMaskedFloat32x4 x y z mask) + // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddMaskedFloat32x8 x y z mask) + // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (FusedMultiplyAddMaskedFloat64x2 x y z mask) + // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddMaskedFloat64x4 x y z mask) + // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (FusedMultiplyAddMaskedFloat64x8 x y z mask) + // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddSubMaskedFloat32x16 x y z mask) + // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (FusedMultiplyAddSubMaskedFloat32x4 x y z mask) + // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddSubMaskedFloat32x8 x y z mask) + // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x16 x mask) - // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + // match: (FusedMultiplyAddSubMaskedFloat64x2 x y z mask) + // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x32 x mask) - // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + // match: (FusedMultiplyAddSubMaskedFloat64x4 x y z mask) + // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x8 x mask) - // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + // match: (FusedMultiplyAddSubMaskedFloat64x8 x y z mask) + // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x16 x mask) - // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat32x16 x y z mask) + // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked512) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x4 x mask) - // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat32x4 x y z mask) + // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked128) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x8 x mask) - // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat32x8 x y z mask) + // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked256) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x2 x mask) - // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat64x2 x y z mask) + // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked128) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x4 x mask) - // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat64x4 x y z mask) + // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked256) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x8 x mask) - // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat64x8 x y z mask) + // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked512) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x16 x mask) - // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + // match: (GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked128) + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x32 x mask) - // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + // match: (GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked256) + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x64 x mask) - // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + // match: (GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked512) + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) + // result: (VGF2P8AFFINEINVQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) + // result: (VGF2P8AFFINEINVQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) + // result: (VGF2P8AFFINEINVQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x16 x y mask) - // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8AFFINEQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x4 x y mask) - // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8AFFINEQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x8 x y mask) - // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8AFFINEQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x16 [a] x y) + // result: (VGF2P8AFFINEQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x32 [a] x y) + // result: (VGF2P8AFFINEQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x64 [a] x y) + // result: (VGF2P8AFFINEQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x2 x y mask) - // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (GaloisFieldMulMaskedUint8x16 x y mask) + // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8MULBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x4 x y mask) - // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (GaloisFieldMulMaskedUint8x32 x y mask) + // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8MULBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x8 x y mask) - // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (GaloisFieldMulMaskedUint8x64 x y mask) + // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8MULBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float32x8 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float64x4 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt16x16 x y mask) - // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Get128Uint8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt16x32 x y mask) - // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (GetElemInt16x8 [a] x) + // result: (VPEXTRW128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt16x8 x y mask) - // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (GetElemInt32x4 [a] x) + // result: (VPEXTRD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt32x16 x y mask) - // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (GetElemInt64x2 [a] x) + // result: (VPEXTRQ128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt32x4 x y mask) - // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (GetElemInt8x16 [a] x) + // result: (VPEXTRB128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt32x8 x y mask) - // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (GetElemUint16x8 [a] x) + // result: (VPEXTRW128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt64x2 x y mask) - // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (GetElemUint32x4 [a] x) + // result: (VPEXTRD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt64x4 x y mask) - // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (GetElemUint64x2 [a] x) + // result: (VPEXTRQ128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt64x8 x y mask) - // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (GetElemUint8x16 [a] x) + // result: (VPEXTRB128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt8x16 x y mask) - // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) return true } + return false } -func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x32 x y mask) - // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt8x64 x y mask) - // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddUint16x16 x y mask) - // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddUint16x32 x y mask) - // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddUint16x8 x y mask) - // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x16 x y mask) - // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x4 x y mask) - // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x8 x y mask) - // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x2 x y mask) - // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x4 x y mask) - // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x8 x y mask) - // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x16 x y mask) - // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x32 x y mask) - // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x64 x y mask) - // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x16 x y mask) - // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x4 x y mask) - // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x8 x y mask) - // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x2 x y mask) - // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x4 x y mask) - // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x8 x y mask) - // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x16 x y mask) - // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x4 x y mask) - // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x8 x y mask) - // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x2 x y mask) - // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x4 x y mask) - // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x8 x y mask) - // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x16 x y mask) - // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x4 x y mask) - // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x8 x y mask) - // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x2 x y mask) - // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x4 x y mask) - // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x8 x y mask) - // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x16 x y mask) - // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x4 x y mask) - // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x8 x y mask) - // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x2 x y mask) - // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x4 x y mask) - // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x8 x y mask) - // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x16 x y mask) - // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x32 x y mask) - // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x8 x y mask) - // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x16 x y mask) - // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x32 x y mask) - // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x64 x y mask) - // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x16 x y mask) - // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x4 x y mask) - // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x8 x y mask) - // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x2 x y mask) - // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x4 x y mask) - // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x8 x y mask) - // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) + // match: (GreaterMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36539,21 +36413,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) + // match: (GreaterMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36561,21 +36435,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) + // match: (GreaterMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36583,21 +36457,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) + // match: (GreaterMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36605,21 +36479,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) + // match: (GreaterMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36627,21 +36501,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) + // match: (GreaterMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36649,351 +36523,428 @@ func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) + // match: (GreaterMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) + // match: (GreaterMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) + // match: (GreaterMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) + // match: (GreaterUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) + // match: (GreaterUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) + // match: (GreaterUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) + // match: (GreaterUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) + // match: (GreaterUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) + // match: (GreaterUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) + // match: (GreaterUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + for { + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) + // match: (IsNanMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37001,21 +36952,21 @@ func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) + // match: (IsNanMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37023,21 +36974,21 @@ func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) + // match: (IsNanMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37045,21 +36996,21 @@ func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) + // match: (IsNanMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37067,21 +37018,21 @@ func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) + // match: (IsNanMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37089,21 +37040,21 @@ func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) + // match: (IsNanMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37111,729 +37062,679 @@ func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) + for { + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLeq16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLeq32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLeq64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLeq8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat32x16 x y z mask) - // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat32x4 x y z mask) - // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Less16 x y) + // result: (SETL (CMPW x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat32x8 x y z mask) - // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Less16U x y) + // result: (SETB (CMPW x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat64x2 x y z mask) - // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Less32 x y) + // result: (SETL (CMPL x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat64x4 x y z mask) - // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat64x8 x y z mask) - // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Less32U x y) + // result: (SETB (CMPL x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat32x16 x y z mask) - // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat32x4 x y z mask) - // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat32x8 x y z mask) - // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat64x2 x y z mask) - // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Less8 x y) + // result: (SETL (CMPB x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat64x4 x y z mask) - // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Less8U x y) + // result: (SETB (CMPB x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat64x8 x y z mask) - // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAddFloat32x16 x y z mask) - // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAddFloat32x4 x y z mask) - // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat32x8 x y z mask) - // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat64x2 x y z mask) - // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat64x4 x y z mask) - // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat64x8 x y z mask) - // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldMulUint8x16 x y mask) - // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldMulUint8x32 x y mask) - // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldMulUint8x64 x y mask) - // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37841,21 +37742,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37863,21 +37764,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37885,21 +37786,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) + // match: (LessEqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37907,21 +37808,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) + // match: (LessEqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37929,21 +37830,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) + // match: (LessEqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37951,21 +37852,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) + // match: (LessEqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37973,21 +37874,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) + // match: (LessEqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37995,21 +37896,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) + // match: (LessEqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38017,21 +37918,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38039,21 +37940,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38061,21 +37962,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38083,21 +37984,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) + // match: (LessEqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38105,21 +38006,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) + // match: (LessEqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38127,21 +38028,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) + // match: (LessEqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38149,21 +38050,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) + // match: (LessEqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38171,21 +38072,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) + // match: (LessEqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38193,21 +38094,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) + // match: (LessEqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38215,21 +38116,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) + // match: (LessEqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38237,21 +38138,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) + // match: (LessEqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38259,21 +38160,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) + // match: (LessEqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38281,21 +38182,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38303,21 +38204,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38325,21 +38226,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38347,21 +38248,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) + // match: (LessEqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38369,21 +38270,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) + // match: (LessEqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38391,21 +38292,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) + // match: (LessEqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38413,21 +38314,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) + // match: (LessEqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38435,21 +38336,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) + // match: (LessEqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38457,21 +38358,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) + // match: (LessEqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38479,483 +38380,545 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) + // match: (LessInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) + // match: (LessInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) + // match: (LessInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) + // match: (LessInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) + // match: (LessInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) + // match: (LessInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) + // match: (LessInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) + // match: (LessInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) + // match: (LessMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38963,21 +38926,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) + // match: (LessMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38985,21 +38948,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) + // match: (LessMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39007,21 +38970,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39029,21 +38992,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39051,21 +39014,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39073,87 +39036,87 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) + // match: (LessMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) + // match: (LessMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) + // match: (LessMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) + // match: (LessMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39161,21 +39124,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) + // match: (LessMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39183,21 +39146,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) + // match: (LessMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39205,21 +39168,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39227,21 +39190,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39249,21 +39212,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39271,153 +39234,87 @@ func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) + // match: (LessMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39425,21 +39322,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) + // match: (LessMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39447,21 +39344,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) + // match: (LessMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39469,21 +39366,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) + // match: (LessMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39491,21 +39388,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) + // match: (LessMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39513,21 +39410,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) + // match: (LessMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39535,21 +39432,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39557,21 +39454,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39579,21 +39476,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39601,21 +39498,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) + // match: (LessMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39623,21 +39520,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) + // match: (LessMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39645,21 +39542,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) + // match: (LessMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39667,936 +39564,1083 @@ func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) + // match: (LessUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) + // match: (LessUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) + // match: (LessUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) + // match: (LessUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) + // match: (LessUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) + // match: (LessUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) + // match: (LessUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) + // match: (LessUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMax32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMax64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x16 x y mask) + // match: (MaxMaskedFloat32x16 x y mask) // result: (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -40609,12 +40653,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x4 x y mask) + // match: (MaxMaskedFloat32x4 x y mask) // result: (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -40627,12 +40671,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x8 x y mask) + // match: (MaxMaskedFloat32x8 x y mask) // result: (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -40645,12 +40689,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x2 x y mask) + // match: (MaxMaskedFloat64x2 x y mask) // result: (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -40663,12 +40707,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x4 x y mask) + // match: (MaxMaskedFloat64x4 x y mask) // result: (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -40681,12 +40725,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x8 x y mask) + // match: (MaxMaskedFloat64x8 x y mask) // result: (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -40699,12 +40743,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x16 x y mask) + // match: (MaxMaskedInt16x16 x y mask) // result: (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -40717,12 +40761,12 @@ func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x32 x y mask) + // match: (MaxMaskedInt16x32 x y mask) // result: (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -40735,12 +40779,12 @@ func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x8 x y mask) + // match: (MaxMaskedInt16x8 x y mask) // result: (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -40753,12 +40797,12 @@ func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x16 x y mask) + // match: (MaxMaskedInt32x16 x y mask) // result: (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -40771,12 +40815,12 @@ func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x4 x y mask) + // match: (MaxMaskedInt32x4 x y mask) // result: (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -40789,12 +40833,12 @@ func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x8 x y mask) + // match: (MaxMaskedInt32x8 x y mask) // result: (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -40807,12 +40851,12 @@ func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x2 x y mask) + // match: (MaxMaskedInt64x2 x y mask) // result: (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -40825,12 +40869,12 @@ func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x4 x y mask) + // match: (MaxMaskedInt64x4 x y mask) // result: (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -40843,12 +40887,12 @@ func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x8 x y mask) + // match: (MaxMaskedInt64x8 x y mask) // result: (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -40861,12 +40905,12 @@ func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x16 x y mask) + // match: (MaxMaskedInt8x16 x y mask) // result: (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 @@ -40879,12 +40923,12 @@ func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x32 x y mask) + // match: (MaxMaskedInt8x32 x y mask) // result: (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 @@ -40897,12 +40941,12 @@ func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x64 x y mask) + // match: (MaxMaskedInt8x64 x y mask) // result: (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 @@ -40915,12 +40959,12 @@ func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x16 x y mask) + // match: (MaxMaskedUint16x16 x y mask) // result: (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -40933,12 +40977,12 @@ func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x32 x y mask) + // match: (MaxMaskedUint16x32 x y mask) // result: (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -40951,12 +40995,12 @@ func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x8 x y mask) + // match: (MaxMaskedUint16x8 x y mask) // result: (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -40969,12 +41013,12 @@ func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x16 x y mask) + // match: (MaxMaskedUint32x16 x y mask) // result: (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -40987,12 +41031,12 @@ func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x4 x y mask) + // match: (MaxMaskedUint32x4 x y mask) // result: (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -41005,12 +41049,12 @@ func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x8 x y mask) + // match: (MaxMaskedUint32x8 x y mask) // result: (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -41023,12843 +41067,12799 @@ func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint64x2 x y mask) - // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint64x4 x y mask) - // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint64x8 x y mask) - // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint8x16 x y mask) - // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint8x32 x y mask) - // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint8x64 x y mask) - // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat32x16 x y mask) - // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat32x4 x y mask) - // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat32x8 x y mask) - // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat64x2 x y mask) - // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat64x4 x y mask) - // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat64x8 x y mask) - // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt16x16 x y mask) - // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt16x32 x y mask) - // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt16x8 x y mask) - // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt32x16 x y mask) - // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt32x4 x y mask) - // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt32x8 x y mask) - // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt64x2 x y mask) - // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x4 x y mask) - // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MaxMaskedUint64x2 x y mask) + // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x8 x y mask) - // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MaxMaskedUint64x4 x y mask) + // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x16 x y mask) - // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaxMaskedUint64x8 x y mask) + // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x32 x y mask) - // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaxMaskedUint8x16 x y mask) + // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x64 x y mask) - // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaxMaskedUint8x32 x y mask) + // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x16 x y mask) - // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaxMaskedUint8x64 x y mask) + // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMin32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x32 x y mask) - // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMin64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x8 x y mask) - // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint32x16 x y mask) - // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MinMaskedFloat32x16 x y mask) + // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUDMasked512) + v.reset(OpAMD64VMINPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint32x4 x y mask) - // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MinMaskedFloat32x4 x y mask) + // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUDMasked128) + v.reset(OpAMD64VMINPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint32x8 x y mask) - // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MinMaskedFloat32x8 x y mask) + // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUDMasked256) + v.reset(OpAMD64VMINPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint64x2 x y mask) - // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedFloat64x2 x y mask) + // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUQMasked128) + v.reset(OpAMD64VMINPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint64x4 x y mask) - // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedFloat64x4 x y mask) + // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUQMasked256) + v.reset(OpAMD64VMINPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint64x8 x y mask) - // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedFloat64x8 x y mask) + // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUQMasked512) + v.reset(OpAMD64VMINPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x16 x y mask) - // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MinMaskedInt16x16 x y mask) + // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMINSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x32 x y mask) - // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MinMaskedInt16x32 x y mask) + // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMINSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x64 x y mask) - // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MinMaskedInt16x8 x y mask) + // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMINSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float32x16 x y mask) - // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MinMaskedInt32x16 x y mask) + // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) + v.reset(OpAMD64VPMINSDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float32x4 x y mask) - // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MinMaskedInt32x4 x y mask) + // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) + v.reset(OpAMD64VPMINSDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float32x8 x y mask) - // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MinMaskedInt32x8 x y mask) + // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) + v.reset(OpAMD64VPMINSDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float64x2 x y mask) - // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedInt64x2 x y mask) + // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) + v.reset(OpAMD64VPMINSQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float64x4 x y mask) - // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedInt64x4 x y mask) + // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) + v.reset(OpAMD64VPMINSQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float64x8 x y mask) - // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedInt64x8 x y mask) + // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) + v.reset(OpAMD64VPMINSQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenInt64x2 x y mask) - // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedInt8x16 x y mask) + // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenInt64x4 x y mask) - // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedInt8x32 x y mask) + // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenInt64x8 x y mask) - // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedInt8x64 x y mask) + // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenUint64x2 x y mask) - // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedUint16x16 x y mask) + // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULUDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMINUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenUint64x4 x y mask) - // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedUint16x32 x y mask) + // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULUDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMINUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenUint64x8 x y mask) - // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedUint16x8 x y mask) + // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULUDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMINUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat32x16 x y mask) - // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MinMaskedUint32x16 x y mask) + // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked512) + v.reset(OpAMD64VPMINUDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat32x4 x y mask) - // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MinMaskedUint32x4 x y mask) + // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked128) + v.reset(OpAMD64VPMINUDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat32x8 x y mask) - // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MinMaskedUint32x8 x y mask) + // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked256) + v.reset(OpAMD64VPMINUDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat64x2 x y mask) - // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedUint64x2 x y mask) + // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked128) + v.reset(OpAMD64VPMINUQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat64x4 x y mask) - // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedUint64x4 x y mask) + // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked256) + v.reset(OpAMD64VPMINUQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat64x8 x y mask) - // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedUint64x8 x y mask) + // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked512) + v.reset(OpAMD64VPMINUQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x16 x y mask) - // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MinMaskedUint8x16 x y mask) + // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMINUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x32 x y mask) - // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MinMaskedUint8x32 x y mask) + // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMINUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x8 x y mask) - // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MinMaskedUint8x64 x y mask) + // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMINUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x16 x y mask) - // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x32 x y mask) - // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x8 x y mask) - // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt16x16 x y mask) - // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt16x32 x y mask) - // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt16x8 x y mask) - // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt32x16 x y mask) - // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt32x4 x y mask) - // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMove(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt32x8 x y mask) - // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [48] dst src mem) + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 48 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [64] dst src mem) + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x2 x y mask) - // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x4 x y mask) - // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x8 x y mask) - // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 9 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } + return false } -func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) + // match: (MulByPowOf2MaskedFloat32x16 x y mask) + // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) + // match: (MulByPowOf2MaskedFloat32x4 x y mask) + // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) + // match: (MulByPowOf2MaskedFloat32x8 x y mask) + // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) + // match: (MulByPowOf2MaskedFloat64x2 x y mask) + // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) + // match: (MulByPowOf2MaskedFloat64x4 x y mask) + // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) + // match: (MulByPowOf2MaskedFloat64x8 x y mask) + // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) + // match: (MulEvenWidenMaskedInt64x2 x y mask) + // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) + // match: (MulEvenWidenMaskedInt64x4 x y mask) + // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) + // match: (MulEvenWidenMaskedInt64x8 x y mask) + // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) + // match: (MulEvenWidenMaskedUint64x2 x y mask) + // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) + // match: (MulEvenWidenMaskedUint64x4 x y mask) + // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) + // match: (MulEvenWidenMaskedUint64x8 x y mask) + // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) + // match: (MulHighMaskedInt16x16 x y mask) + // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) + // match: (MulHighMaskedInt16x32 x y mask) + // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) + // match: (MulHighMaskedInt16x8 x y mask) + // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) + // match: (MulHighMaskedUint16x16 x y mask) + // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) + // match: (MulHighMaskedUint16x32 x y mask) + // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) + // match: (MulHighMaskedUint16x8 x y mask) + // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) + // match: (MulLowMaskedInt16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) + // match: (MulLowMaskedInt16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) + // match: (MulLowMaskedInt16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt32x16 x y mask) - // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulLowMaskedInt32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked512) + v.reset(OpAMD64VPMULLDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt32x4 x y mask) - // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulLowMaskedInt32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked128) + v.reset(OpAMD64VPMULLDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt32x8 x y mask) - // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulLowMaskedInt32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked256) + v.reset(OpAMD64VPMULLDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt64x2 x y mask) - // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulLowMaskedInt64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked128) + v.reset(OpAMD64VPMULLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt64x4 x y mask) - // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulLowMaskedInt64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked256) + v.reset(OpAMD64VPMULLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt64x8 x y mask) - // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulLowMaskedInt64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked512) + v.reset(OpAMD64VPMULLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint32x16 x y mask) - // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulMaskedFloat32x16 x y mask) + // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked512) + v.reset(OpAMD64VMULPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint32x4 x y mask) - // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulMaskedFloat32x4 x y mask) + // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked128) + v.reset(OpAMD64VMULPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint32x8 x y mask) - // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulMaskedFloat32x8 x y mask) + // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked256) + v.reset(OpAMD64VMULPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint64x2 x y mask) - // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulMaskedFloat64x2 x y mask) + // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked128) + v.reset(OpAMD64VMULPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint64x4 x y mask) - // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulMaskedFloat64x4 x y mask) + // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked256) + v.reset(OpAMD64VMULPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint64x8 x y mask) - // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulMaskedFloat64x8 x y mask) + // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked512) + v.reset(OpAMD64VMULPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + for { + x := v_0 + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpNeg64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + for { + x := v_0 + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdInt16x16 x y mask) - // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORLconst [1] x) + for { + x := v_0 + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdInt16x32 x y mask) - // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdInt16x8 x y mask) - // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt32x16 [a] x mask) - // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt32x4 [a] x mask) - // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt32x8 [a] x mask) - // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt64x2 [a] x mask) - // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt64x4 [a] x mask) - // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt64x8 [a] x mask) - // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint32x16 [a] x mask) - // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint32x4 [a] x mask) - // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint32x8 [a] x mask) - // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint64x2 [a] x mask) - // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint64x4 [a] x mask) - // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint64x8 [a] x mask) - // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt32x16 [a] x mask) - // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt32x4 [a] x mask) - // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt32x8 [a] x mask) - // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt64x2 [a] x mask) - // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt64x4 [a] x mask) - // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt64x8 [a] x mask) - // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint32x16 [a] x mask) - // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint32x4 [a] x mask) - // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint32x8 [a] x mask) - // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint64x2 [a] x mask) - // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint64x4 [a] x mask) - // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint64x8 [a] x mask) - // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt32x16 x y mask) - // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt32x4 x y mask) - // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt32x8 x y mask) - // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt64x2 x y mask) - // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt64x4 x y mask) - // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) return true } -} -func rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRotateLeftInt64x8 x y mask) - // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint32x16 x y mask) - // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (OrMaskedInt32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVDMasked512) + v.reset(OpAMD64VPORDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint32x4 x y mask) - // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (OrMaskedInt32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVDMasked128) + v.reset(OpAMD64VPORDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint32x8 x y mask) - // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (OrMaskedInt32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVDMasked256) + v.reset(OpAMD64VPORDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint64x2 x y mask) - // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (OrMaskedInt64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVQMasked128) + v.reset(OpAMD64VPORQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint64x4 x y mask) - // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (OrMaskedInt64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVQMasked256) + v.reset(OpAMD64VPORQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint64x8 x y mask) - // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (OrMaskedInt64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVQMasked512) + v.reset(OpAMD64VPORQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt32x16 x y mask) - // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (OrMaskedUint32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVDMasked512) + v.reset(OpAMD64VPORDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt32x4 x y mask) - // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (OrMaskedUint32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVDMasked128) + v.reset(OpAMD64VPORDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt32x8 x y mask) - // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (OrMaskedUint32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVDMasked256) + v.reset(OpAMD64VPORDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt64x2 x y mask) - // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (OrMaskedUint64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked128) + v.reset(OpAMD64VPORQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt64x4 x y mask) - // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (OrMaskedUint64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked256) + v.reset(OpAMD64VPORQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt64x8 x y mask) - // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (OrMaskedUint64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked512) + v.reset(OpAMD64VPORQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint32x16 x y mask) - // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (PairDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked512) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint32x4 x y mask) - // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (PairDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked128) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint32x8 x y mask) - // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (PairDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked256) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint64x2 x y mask) - // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (PairDotProdMaskedInt16x16 x y mask) + // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint64x4 x y mask) - // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (PairDotProdMaskedInt16x32 x y mask) + // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint64x8 x y mask) - // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (PairDotProdMaskedInt16x8 x y mask) + // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpAMD64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpAMD64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpAMD64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueAMD64_OpPopCount16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (POPCNTL (MOVWQZX x)) + for { + x := v_0 + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpPopCount8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount8 x) + // result: (POPCNTL (MOVBQZX x)) + for { + x := v_0 + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpPopCountMaskedInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + // match: (PopCountMaskedInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + // match: (PopCountMaskedInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + // match: (PopCountMaskedInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + // match: (PopCountMaskedInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + // match: (PopCountMaskedInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + // match: (PopCountMaskedInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (PopCountMaskedInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (PopCountMaskedInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (PopCountMaskedInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (PopCountMaskedInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (PopCountMaskedInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (PopCountMaskedInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (PopCountMaskedUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (PopCountMaskedUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (PopCountMaskedUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (PopCountMaskedUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (PopCountMaskedUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (PopCountMaskedUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (PopCountMaskedUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (PopCountMaskedUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (PopCountMaskedUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (PopCountMaskedUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (PopCountMaskedUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (PopCountMaskedUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (RotateAllLeftInt32x16 [a] x) + // result: (VPROLD512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (RotateAllLeftInt32x4 [a] x) + // result: (VPROLD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x8 [a] x) + // result: (VPROLD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x2 [a] x) + // result: (VPROLQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x4 [a] x) + // result: (VPROLQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (RotateAllLeftInt64x8 [a] x) + // result: (VPROLQ512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPROLQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllLeftMaskedInt32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllLeftMaskedInt32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllLeftMaskedInt32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (RotateAllLeftMaskedInt64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (RotateAllLeftMaskedInt64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (RotateAllLeftMaskedInt64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllLeftMaskedUint32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllLeftMaskedUint32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllLeftMaskedUint32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RotateAllLeftMaskedUint64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RotateAllLeftMaskedUint64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RotateAllLeftMaskedUint64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RotateAllLeftUint32x16 [a] x) + // result: (VPROLD512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RotateAllLeftUint32x4 [a] x) + // result: (VPROLD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RotateAllLeftUint32x8 [a] x) + // result: (VPROLD256 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllLeftUint64x2 [a] x) + // result: (VPROLQ128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) + v.reset(OpAMD64VPROLQ128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllLeftUint64x4 [a] x) + // result: (VPROLQ256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) + v.reset(OpAMD64VPROLQ256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllLeftUint64x8 [a] x) + // result: (VPROLQ512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) + v.reset(OpAMD64VPROLQ512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateAllRightInt32x16 [a] x) + // result: (VPRORD512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) + v.reset(OpAMD64VPRORD512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateAllRightInt32x4 [a] x) + // result: (VPRORD128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) + v.reset(OpAMD64VPRORD128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateAllRightInt32x8 [a] x) + // result: (VPRORD256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) + v.reset(OpAMD64VPRORD256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightInt64x2 [a] x) + // result: (VPRORQ128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) + v.reset(OpAMD64VPRORQ128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightInt64x4 [a] x) + // result: (VPRORQ256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) + v.reset(OpAMD64VPRORQ256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightInt64x8 [a] x) + // result: (VPRORQ512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) + v.reset(OpAMD64VPRORQ512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllRightMaskedInt32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllRightMaskedInt32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllRightMaskedInt32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateAllRightMaskedInt64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateAllRightMaskedInt64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateAllRightMaskedInt64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightMaskedUint32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightMaskedUint32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightMaskedUint32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftInt64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightMaskedUint64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftInt64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightMaskedUint64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftInt64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightMaskedUint64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftUint64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightUint32x16 [a] x) + // result: (VPRORD512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPRORD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftUint64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightUint32x4 [a] x) + // result: (VPRORD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPRORD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftUint64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightUint32x8 [a] x) + // result: (VPRORD256 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPRORD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllRightUint64x2 [a] x) + // result: (VPRORQ128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) + v.reset(OpAMD64VPRORQ128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllRightUint64x4 [a] x) + // result: (VPRORQ256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) + v.reset(OpAMD64VPRORQ256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllRightUint64x8 [a] x) + // result: (VPRORQ512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) + v.reset(OpAMD64VPRORQ512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateLeftMaskedInt32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateLeftMaskedInt32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateLeftMaskedInt32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateLeftMaskedInt64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateLeftMaskedInt64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateLeftMaskedInt64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateLeftMaskedUint32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateLeftMaskedUint32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateLeftMaskedUint32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateLeftMaskedUint64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateLeftMaskedUint64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateLeftMaskedUint64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedInt32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedInt32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedInt32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightInt64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedInt64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) + v.reset(OpAMD64VPRORVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightInt64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedInt64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) + v.reset(OpAMD64VPRORVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightInt64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedInt64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) + v.reset(OpAMD64VPRORVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightSignExtendedInt64x2 x y mask) - // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedUint32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightSignExtendedInt64x4 x y mask) - // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedUint32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightSignExtendedInt64x8 x y mask) - // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedUint32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightUint64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedUint64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) + v.reset(OpAMD64VPRORVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightUint64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedUint64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) + v.reset(OpAMD64VPRORVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightUint64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedUint64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) + v.reset(OpAMD64VPRORVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) - // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) - // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (RoundFloat32x4 x) + // result: (VROUNDPS128 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) - // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (RoundFloat32x8 x) + // result: (VROUNDPS256 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) - // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RoundFloat64x2 x) + // result: (VROUNDPD128 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) - // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RoundFloat64x4 x) + // result: (VROUNDPD256 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) - // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) - // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (RoundWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) - // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (RoundWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) - // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (RoundWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) - // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (RoundWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) - // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (RoundWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) - // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (RoundWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) - // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RoundWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked512) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) - // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RoundWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked128) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) - // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RoundWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked256) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) - // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (RoundWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked128) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) - // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (RoundWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked256) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) - // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (RoundWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked512) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt16x16 x y mask) - // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt16x32 x y mask) - // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt16x8 x y mask) - // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt32x16 x y mask) - // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt32x4 x y mask) - // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh16Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt32x8 x y mask) - // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt64x2 x y mask) - // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt64x4 x y mask) - // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt64x8 x y mask) - // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint16x16 x y mask) - // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint16x32 x y mask) - // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint16x8 x y mask) - // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint32x16 x y mask) - // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint32x4 x y mask) - // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint32x8 x y mask) - // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint64x2 x y mask) - // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint64x4 x y mask) - // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint64x8 x y mask) - // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) - // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) - // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) - // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Rsh32Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) - // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) - // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) - // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) - // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) - // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) - // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) - // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) - // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Rsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) - // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) - // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) - // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) - // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Rsh64Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) - // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) - // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh64Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) - // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt16x16 x y mask) - // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh64Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt16x32 x y mask) - // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt16x8 x y mask) - // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh64Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt32x16 x y mask) - // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt32x4 x y mask) - // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt32x8 x y mask) - // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt64x2 x y mask) - // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt64x4 x y mask) - // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt64x8 x y mask) - // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh8Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint16x16 x y mask) - // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedAddMaskedInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked256) + v.reset(OpAMD64VPADDSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint16x32 x y mask) - // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedAddMaskedInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512) + v.reset(OpAMD64VPADDSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint16x8 x y mask) - // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedAddMaskedInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked128) + v.reset(OpAMD64VPADDSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint32x16 x y mask) - // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SaturatedAddMaskedInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint32x4 x y mask) - // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SaturatedAddMaskedInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint32x8 x y mask) - // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SaturatedAddMaskedInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint64x2 x y mask) - // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SaturatedAddMaskedUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint64x4 x y mask) - // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SaturatedAddMaskedUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint64x8 x y mask) - // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SaturatedAddMaskedUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat32x16 x mask) - // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) + // match: (SaturatedAddMaskedUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat32x4 x mask) - // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) + // match: (SaturatedAddMaskedUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat32x8 x mask) - // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) + // match: (SaturatedAddMaskedUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat64x2 x mask) - // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) + // match: (SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat64x4 x mask) - // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) + // match: (SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat64x8 x mask) - // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) + // match: (SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat32x16 x y mask) - // result: (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SaturatedSubMaskedInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat32x4 x y mask) - // result: (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SaturatedSubMaskedInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat32x8 x y mask) - // result: (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SaturatedSubMaskedInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat64x2 x y mask) - // result: (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SaturatedSubMaskedInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat64x4 x y mask) - // result: (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SaturatedSubMaskedInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat64x8 x y mask) - // result: (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SaturatedSubMaskedInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt16x16 x y mask) - // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedSubMaskedUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) + v.reset(OpAMD64VPSUBSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt16x32 x y mask) - // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedSubMaskedUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) + v.reset(OpAMD64VPSUBSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt16x8 x y mask) - // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedSubMaskedUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) + v.reset(OpAMD64VPSUBSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt32x16 x y mask) - // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SaturatedSubMaskedUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt32x4 x y mask) - // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SaturatedSubMaskedUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt32x8 x y mask) - // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SaturatedSubMaskedUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt64x2 x y mask) - // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt64x4 x y mask) - // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt64x8 x y mask) - // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt8x16 x y mask) - // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt8x32 x y mask) - // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt8x64 x y mask) - // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint16x16 x y mask) - // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint16x32 x y mask) - // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint16x8 x y mask) - // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint32x16 x y mask) - // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select0 (MULQU x y)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x4 x y mask) - // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Select0 (Mul32uover x y)) + // result: (Select0 (MULLU x y)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x8 x y mask) - // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Select0 (Add64carry x y c)) + // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (AddTupleFirst32 val tuple)) + // result: (ADDL val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst32 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDL) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + // match: (Select0 (AddTupleFirst64 val tuple)) + // result: (ADDQ val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst64 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + // match: (Select0 a:(ADDQconstflags [c] x)) + // cond: a.Uses == 1 + // result: (ADDQconst [c] x) + for { + a := v_0 + if a.Op != OpAMD64ADDQconstflags { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Select0 a:(ADDLconstflags [c] x)) + // cond: a.Uses == 1 + // result: (ADDLconst [c] x) + for { + a := v_0 + if a.Op != OpAMD64ADDLconstflags { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64ADDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelect1(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint64x2 x y mask) - // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SETO (Select1 (MULQU x y))) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Mul32uover x y)) + // result: (SETO (Select1 (MULLU x y))) + for { + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Add64carry x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x4 x y mask) - // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Select1 (Sub64borrow x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x8 x y mask) - // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Select1 (NEGLflags (MOVQconst [0]))) + // result: (FlagEQ) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + v.reset(OpAMD64FlagEQ) return true } -} -func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x16 x y mask) - // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) + // result: x for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64SBBQcarrymask { + break + } + x := v_0_0_0.Args[0] + v.copyOf(x) return true } -} -func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x32 x y mask) - // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (Select1 (AddTupleFirst32 _ tuple)) + // result: (Select1 tuple) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64AddTupleFirst32 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) return true } -} -func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x64 x y mask) - // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (Select1 (AddTupleFirst64 _ tuple)) + // result: (Select1 tuple) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64AddTupleFirst64 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + // match: (Select1 a:(LoweredAtomicAnd64 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ANDQlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicAnd64 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ANDQlock) + v.AddArg3(ptr, val, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + // match: (Select1 a:(LoweredAtomicAnd32 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ANDLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicAnd32 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + // match: (Select1 a:(LoweredAtomicOr64 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ORQlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicOr64 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ORQlock) + v.AddArg3(ptr, val, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + // match: (Select1 a:(LoweredAtomicOr32 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ORLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicOr32 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } + return false } -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelectN(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedTruncWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + config := b.Func.Config + // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) + // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) + // result: (Move [sc.Val64()] dst src mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpAMD64MOVQstoreconst { + break + } + sc := auxIntToValAndOff(s1.AuxInt) + _ = s1.Args[1] + s2 := s1.Args[1] + if s2.Op != OpAMD64MOVQstore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpAMD64MOVQstore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sc.Val64()) + v.AddArg3(dst, src, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) + // result: (Move [sz] dst src mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpAMD64MOVQconst { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) return true } + return false } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Set128Float32x8 [a] x y) + // result: (VINSERTF128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Set128Float64x4 [a] x y) + // result: (VINSERTF128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Set128Int16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Set128Int32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Set128Int64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Set128Int8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x16 x y mask) - // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Set128Uint16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x4 x y mask) - // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Set128Uint32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x8 x y mask) - // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Set128Uint64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x2 x y mask) - // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Set128Uint8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x4 x y mask) - // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SetElemInt16x8 [a] x y) + // result: (VPINSRW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x8 x y mask) - // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SetElemInt32x4 [a] x y) + // result: (VPINSRD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x16 x y mask) - // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SetElemInt64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x4 x y mask) - // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SetElemInt8x16 [a] x y) + // result: (VPINSRB128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x8 x y mask) - // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SetElemUint16x8 [a] x y) + // result: (VPINSRW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x2 x y mask) - // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SetElemUint32x4 [a] x y) + // result: (VPINSRD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x4 x y mask) - // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SetElemUint64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x8 x y mask) - // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SetElemUint8x16 [a] x y) + // result: (VPINSRB128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMax32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) + // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMax64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) + // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMin32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) + // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMin64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) + // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) + // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) for { - a := auxIntToBool(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod16u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) + // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) + // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) for { - a := auxIntToBool(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod32u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) + // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) + // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) for { - a := auxIntToBool(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod64u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMod8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMod8u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMove(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [32] dst src mem) - // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 32 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [48] dst src mem) - // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 48 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [64] dst src mem) - // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 64 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(32) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(32) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(32) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 <= 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 <= 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 > 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) - // result: (DUFFCOPY [s] dst src mem) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64DUFFCOPY) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpNeg32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) + // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) + // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) + // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) + // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) + // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) + // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeqB(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) + // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) + // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNot(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) + // match: (ShiftAllLeftMaskedInt64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x4 x y) - // result: (VCMPPS128 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x8 x y) - // result: (VCMPPS256 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedUint64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x2 x y) - // result: (VCMPPD128 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedUint64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x4 x y) - // result: (VCMPPD256 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedUint64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpOffPtr(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) - for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) - return true - } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) + // match: (ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpPanicBounds(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PanicBounds [kind] x y mem) - // cond: boundsABI(kind) == 0 - // result: (LoweredPanicBoundsA [kind] x y mem) + b := v.Block + // match: (ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - kind := auxIntToInt64(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mem := v_2 - if !(boundsABI(kind) == 0) { - break - } - v.reset(OpAMD64LoweredPanicBoundsA) - v.AuxInt = int64ToAuxInt(kind) - v.AddArg3(x, y, mem) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (PanicBounds [kind] x y mem) - // cond: boundsABI(kind) == 1 - // result: (LoweredPanicBoundsB [kind] x y mem) +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) for { - kind := auxIntToInt64(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mem := v_2 - if !(boundsABI(kind) == 1) { - break - } - v.reset(OpAMD64LoweredPanicBoundsB) - v.AuxInt = int64ToAuxInt(kind) - v.AddArg3(x, y, mem) + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } - // match: (PanicBounds [kind] x y mem) - // cond: boundsABI(kind) == 2 - // result: (LoweredPanicBoundsC [kind] x y mem) +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) for { - kind := auxIntToInt64(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mem := v_2 - if !(boundsABI(kind) == 2) { - break - } - v.reset(OpAMD64LoweredPanicBoundsC) - v.AuxInt = int64ToAuxInt(kind) - v.AddArg3(x, y, mem) + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpPopCount16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (PopCount16 x) - // result: (POPCNTL (MOVWQZX x)) + // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64POPCNTL) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpPopCount8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (PopCount8 x) - // result: (POPCNTL (MOVBQZX x)) + // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64POPCNTL) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt32x16 [a] x) - // result: (VPROLD512 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD512) + y := v_1 + v.reset(OpAMD64VPSHRDD128) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt32x4 [a] x) - // result: (VPROLD128 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD128) + y := v_1 + v.reset(OpAMD64VPSHRDD256) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt32x8 [a] x) - // result: (VPROLD256 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD256) + y := v_1 + v.reset(OpAMD64VPSHRDQ128) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt64x2 [a] x) - // result: (VPROLQ128 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ128) + y := v_1 + v.reset(OpAMD64VPSHRDQ256) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt64x4 [a] x) - // result: (VPROLQ256 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ256) + y := v_1 + v.reset(OpAMD64VPSHRDQ512) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt64x8 [a] x) - // result: (VPROLQ512 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedInt64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint32x16 [a] x) - // result: (VPROLD512 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedInt64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint32x4 [a] x) - // result: (VPROLD128 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedInt64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint32x8 [a] x) - // result: (VPROLD256 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedUint64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint64x2 [a] x) - // result: (VPROLQ128 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedUint64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint64x4 [a] x) - // result: (VPROLQ256 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedUint64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint64x8 [a] x) - // result: (VPROLQ512 [a] x) + b := v.Block + // match: (ShiftAllRightSignExtendedMaskedInt64x2 x y mask) + // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt32x16 [a] x) - // result: (VPRORD512 [a] x) + b := v.Block + // match: (ShiftAllRightSignExtendedMaskedInt64x4 x y mask) + // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt32x4 [a] x) - // result: (VPRORD128 [a] x) + b := v.Block + // match: (ShiftAllRightSignExtendedMaskedInt64x8 x y mask) + // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt32x8 [a] x) - // result: (VPRORD256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt64x2 [a] x) - // result: (VPRORQ128 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt64x4 [a] x) - // result: (VPRORQ256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt64x8 [a] x) - // result: (VPRORQ512 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint32x16 [a] x) - // result: (VPRORD512 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint32x4 [a] x) - // result: (VPRORD128 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint32x8 [a] x) - // result: (VPRORD256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint64x2 [a] x) - // result: (VPRORQ128 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint64x4 [a] x) - // result: (VPRORQ256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint64x8 [a] x) - // result: (VPRORQ512 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat32x4 x) - // result: (VROUNDPS128 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat32x8 x) - // result: (VROUNDPS256 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat64x2 x) - // result: (VROUNDPD128 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat64x4 x) - // result: (VROUNDPD256 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundToEven(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundToEven x) - // result: (ROUNDSD [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+0] x) + b := v.Block + // match: (ShiftLeftMaskedInt16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+0] x) + b := v.Block + // match: (ShiftLeftMaskedInt16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+0] x) + b := v.Block + // match: (ShiftLeftMaskedInt16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) + // match: (ShiftLeftMaskedInt32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedInt32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) + // match: (ShiftLeftMaskedInt32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedInt64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) + // match: (ShiftLeftMaskedInt64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedInt64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) + // match: (ShiftLeftMaskedUint16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) + // match: (ShiftLeftMaskedUint16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) + // match: (ShiftLeftMaskedUint32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x64(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) + // match: (ShiftLeftMaskedUint64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) + // match: (ShiftLeftMaskedUint64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) + // match: (ShiftRightMaskedInt16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) + // match: (ShiftRightMaskedInt16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) + // match: (ShiftRightMaskedInt32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) + // match: (ShiftRightMaskedInt64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + // match: (ShiftRightMaskedInt64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) + // match: (ShiftRightMaskedUint16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) + // match: (ShiftRightMaskedUint32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) + // match: (ShiftRightMaskedUint32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) + // match: (ShiftRightMaskedUint64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) + // match: (ShiftRightSignExtendedMaskedInt16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) + // match: (ShiftRightSignExtendedMaskedInt16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) + // match: (ShiftRightSignExtendedMaskedInt32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedInt64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedInt64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedUint16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedUint32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpSelect0(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Select0 (Mul64uover x y)) - // result: (Select0 (MULQU x y)) + // match: (ShiftRightSignExtendedMaskedUint32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - if v_0.Op != OpMul64uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg2(x, y) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (Mul32uover x y)) - // result: (Select0 (MULLU x y)) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - if v_0.Op != OpMul32uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpSelect0) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v0.AddArg2(x, y) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (Add64carry x y c)) - // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - if v_0.Op != OpAdd64carry { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v2.AddArg(c) - v1.AddArg(v2) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (Sub64borrow x y c)) - // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - if v_0.Op != OpSub64borrow { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v2.AddArg(c) - v1.AddArg(v2) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (AddTupleFirst32 val tuple)) - // result: (ADDL val (Select0 tuple)) +} +func rewriteValueAMD64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SARQconst (NEGQ x) [63]) for { t := v.Type - if v_0.Op != OpAMD64AddTupleFirst32 { - break - } - tuple := v_0.Args[1] - val := v_0.Args[0] - v.reset(OpAMD64ADDL) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v0.AddArg(tuple) - v.AddArg2(val, v0) + x := v_0 + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (Select0 (AddTupleFirst64 val tuple)) - // result: (ADDQ val (Select0 tuple)) +} +func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SpectreIndex x y) + // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) for { - t := v.Type - if v_0.Op != OpAMD64AddTupleFirst64 { - break - } - tuple := v_0.Args[1] - val := v_0.Args[0] - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v0.AddArg(tuple) - v.AddArg2(val, v0) + x := v_0 + y := v_1 + v.reset(OpAMD64CMOVQCC) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } - // match: (Select0 a:(ADDQconstflags [c] x)) - // cond: a.Uses == 1 - // result: (ADDQconst [c] x) +} +func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SpectreSliceIndex x y) + // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { - a := v_0 - if a.Op != OpAMD64ADDQconstflags { - break - } - c := auxIntToInt32(a.AuxInt) - x := a.Args[0] - if !(a.Uses == 1) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) + x := v_0 + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } - // match: (Select0 a:(ADDLconstflags [c] x)) - // cond: a.Uses == 1 - // result: (ADDLconst [c] x) +} +func rewriteValueAMD64_OpSqrtMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat32x16 x mask) + // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) for { - a := v_0 - if a.Op != OpAMD64ADDLconstflags { - break - } - c := auxIntToInt32(a.AuxInt) - x := a.Args[0] - if !(a.Uses == 1) { - break - } - v.reset(OpAMD64ADDLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpSelect1(v *Value) bool { +func rewriteValueAMD64_OpSqrtMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Select1 (Mul64uover x y)) - // result: (SETO (Select1 (MULQU x y))) - for { - if v_0.Op != OpMul64uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpAMD64SETO) - v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg2(x, y) - v0.AddArg(v1) - v.AddArg(v0) + // match: (SqrtMaskedFloat32x4 x mask) + // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (Mul32uover x y)) - // result: (SETO (Select1 (MULLU x y))) +} +func rewriteValueAMD64_OpSqrtMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat32x8 x mask) + // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) for { - if v_0.Op != OpMul32uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpAMD64SETO) - v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v1.AddArg2(x, y) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (Add64carry x y c)) - // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) +} +func rewriteValueAMD64_OpSqrtMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat64x2 x mask) + // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) for { - if v_0.Op != OpAdd64carry { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpAMD64NEGQ) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v4.AddArg(c) - v3.AddArg(v4) - v2.AddArg3(x, y, v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (Sub64borrow x y c)) - // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) +} +func rewriteValueAMD64_OpSqrtMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat64x4 x mask) + // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) for { - if v_0.Op != OpSub64borrow { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpAMD64NEGQ) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v4.AddArg(c) - v3.AddArg(v4) - v2.AddArg3(x, y, v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (NEGLflags (MOVQconst [0]))) - // result: (FlagEQ) +} +func rewriteValueAMD64_OpSqrtMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat64x8 x mask) + // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) for { - if v_0.Op != OpAMD64NEGLflags { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { - break - } - v.reset(OpAMD64FlagEQ) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) - // result: x +} +func rewriteValueAMD64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVSDstore ptr val mem) for { - if v_0.Op != OpAMD64NEGLflags { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64SBBQcarrymask { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { break } - x := v_0_0_0.Args[0] - v.copyOf(x) + v.reset(OpAMD64MOVSDstore) + v.AddArg3(ptr, val, mem) return true } - // match: (Select1 (AddTupleFirst32 _ tuple)) - // result: (Select1 tuple) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVSSstore ptr val mem) for { - if v_0.Op != OpAMD64AddTupleFirst32 { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { break } - tuple := v_0.Args[1] - v.reset(OpSelect1) - v.AddArg(tuple) + v.reset(OpAMD64MOVSSstore) + v.AddArg3(ptr, val, mem) return true } - // match: (Select1 (AddTupleFirst64 _ tuple)) - // result: (Select1 tuple) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVQstore ptr val mem) for { - if v_0.Op != OpAMD64AddTupleFirst64 { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { break } - tuple := v_0.Args[1] - v.reset(OpSelect1) - v.AddArg(tuple) + v.reset(OpAMD64MOVQstore) + v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicAnd64 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ANDQlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVLstore ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicAnd64 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { break } - v.reset(OpAMD64ANDQlock) + v.reset(OpAMD64MOVLstore) v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicAnd32 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ANDLlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVWstore ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicAnd32 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { break } - v.reset(OpAMD64ANDLlock) + v.reset(OpAMD64MOVWstore) v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicOr64 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ORQlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicOr64 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { break } - v.reset(OpAMD64ORQlock) + v.reset(OpAMD64MOVBstore) v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicOr32 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ORLlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 16 + // result: (VMOVDQUstore128 ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicOr32 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 16) { break } - v.reset(OpAMD64ORLlock) + v.reset(OpAMD64VMOVDQUstore128) v.AddArg3(ptr, val, mem) return true } - return false -} -func rewriteValueAMD64_OpSelectN(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - config := b.Func.Config - // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) - // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) - // result: (Move [sc.Val64()] dst src mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 32 + // result: (VMOVDQUstore256 ptr val mem) for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - call := v_0 - if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { - break - } - sym := auxToCall(call.Aux) - s1 := call.Args[0] - if s1.Op != OpAMD64MOVQstoreconst { - break - } - sc := auxIntToValAndOff(s1.AuxInt) - _ = s1.Args[1] - s2 := s1.Args[1] - if s2.Op != OpAMD64MOVQstore { - break - } - _ = s2.Args[2] - src := s2.Args[1] - s3 := s2.Args[2] - if s3.Op != OpAMD64MOVQstore { - break - } - mem := s3.Args[2] - dst := s3.Args[1] - if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 32) { break } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(sc.Val64()) - v.AddArg3(dst, src, mem) + v.reset(OpAMD64VMOVDQUstore256) + v.AddArg3(ptr, val, mem) return true } - // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) - // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) - // result: (Move [sz] dst src mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 64 + // result: (VMOVDQUstore512 ptr val mem) for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - call := v_0 - if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { - break - } - sym := auxToCall(call.Aux) - mem := call.Args[3] - dst := call.Args[0] - src := call.Args[1] - call_2 := call.Args[2] - if call_2.Op != OpAMD64MOVQconst { - break - } - sz := auxIntToInt64(call_2.AuxInt) - if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 64) { break } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(sz) - v.AddArg3(dst, src, mem) + v.reset(OpAMD64VMOVDQUstore512) + v.AddArg3(ptr, val, mem) return true } return false } -func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float32x8 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float64x4 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Int64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat32x16 x y mask) + // result: (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Int8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat32x4 x y mask) + // result: (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat32x8 x y mask) + // result: (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat64x2 x y mask) + // result: (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat64x4 x y mask) + // result: (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat64x8 x y mask) + // result: (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt16x8 [a] x y) - // result: (VPINSRW128 [a] x y) + b := v.Block + // match: (SubMaskedInt16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt32x4 [a] x y) - // result: (VPINSRD128 [a] x y) + b := v.Block + // match: (SubMaskedInt16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) + b := v.Block + // match: (SubMaskedInt16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt8x16 [a] x y) - // result: (VPINSRB128 [a] x y) + b := v.Block + // match: (SubMaskedInt32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint16x8 [a] x y) - // result: (VPINSRW128 [a] x y) + b := v.Block + // match: (SubMaskedInt32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint32x4 [a] x y) - // result: (VPINSRD128 [a] x y) + b := v.Block + // match: (SubMaskedInt32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) + b := v.Block + // match: (SubMaskedInt64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint8x16 [a] x y) - // result: (VPINSRB128 [a] x y) + b := v.Block + // match: (SubMaskedInt64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) + b := v.Block + // match: (SubMaskedInt64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) + b := v.Block + // match: (SubMaskedInt8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) + b := v.Block + // match: (SubMaskedInt8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) + b := v.Block + // match: (SubMaskedInt8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) + b := v.Block + // match: (SubMaskedUint16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) + b := v.Block + // match: (SubMaskedUint16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) + b := v.Block + // match: (SubMaskedUint16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) + b := v.Block + // match: (SubMaskedUint32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) + b := v.Block + // match: (SubMaskedUint32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) + b := v.Block + // match: (SubMaskedUint32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) + b := v.Block + // match: (SubMaskedUint64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) + b := v.Block + // match: (SubMaskedUint64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) + b := v.Block + // match: (SubMaskedUint64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) + b := v.Block + // match: (SubMaskedUint8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) + b := v.Block + // match: (SubMaskedUint8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) + b := v.Block + // match: (SubMaskedUint8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) + // match: (Trunc x) + // result: (ROUNDSD [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) + // match: (TruncFloat32x4 x) + // result: (VROUNDPS128 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) + // match: (TruncFloat32x8 x) + // result: (VROUNDPS256 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) + // match: (TruncFloat64x2 x) + // result: (VROUNDPD128 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) + // match: (TruncFloat64x4 x) + // result: (VROUNDPD256 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) + // match: (TruncWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) + // match: (TruncWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) + // match: (TruncWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) + // match: (TruncWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) + // match: (TruncWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) + // match: (TruncWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) + b := v.Block + // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) + b := v.Block + // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) + b := v.Block + // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSlicemask(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Slicemask x) - // result: (SARQconst (NEGQ x) [63]) + // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 - v.reset(OpAMD64SARQconst) - v.AuxInt = int8ToAuxInt(63) - v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (SpectreIndex x y) - // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) + // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64CMOVQCC) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (SpectreSliceIndex x y) - // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) + // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpStore(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Store {t} ptr val mem) - // cond: t.Size() == 8 && t.IsFloat() - // result: (MOVSDstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 8 && t.IsFloat()) { - break - } - v.reset(OpAMD64MOVSDstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 4 && t.IsFloat() - // result: (MOVSSstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 4 && t.IsFloat()) { - break - } - v.reset(OpAMD64MOVSSstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 8 && !t.IsFloat() - // result: (MOVQstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 8 && !t.IsFloat()) { - break - } - v.reset(OpAMD64MOVQstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 4 && !t.IsFloat() - // result: (MOVLstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 4 && !t.IsFloat()) { - break - } - v.reset(OpAMD64MOVLstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 2 - // result: (MOVWstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 2) { - break - } - v.reset(OpAMD64MOVWstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 1 - // result: (MOVBstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 1) { - break - } - v.reset(OpAMD64MOVBstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 16 - // result: (VMOVDQUstore128 ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUstore128) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 32 - // result: (VMOVDQUstore256 ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUstore256) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 64 - // result: (VMOVDQUstore512 ptr val mem) + b := v.Block + // match: (XorMaskedInt32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUstore512) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpTrunc(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Trunc x) - // result: (ROUNDSD [3] x) + b := v.Block + // match: (XorMaskedInt32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat32x4 x) - // result: (VROUNDPS128 [3] x) + b := v.Block + // match: (XorMaskedInt32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat32x8 x) - // result: (VROUNDPS256 [3] x) + b := v.Block + // match: (XorMaskedInt64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat64x2 x) - // result: (VROUNDPD128 [3] x) + b := v.Block + // match: (XorMaskedInt64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat64x4 x) - // result: (VROUNDPD256 [3] x) + b := v.Block + // match: (XorMaskedInt64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+3] x) + b := v.Block + // match: (XorMaskedUint32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+3] x) + b := v.Block + // match: (XorMaskedUint32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+3] x) + b := v.Block + // match: (XorMaskedUint32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+3] x) + b := v.Block + // match: (XorMaskedUint64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+3] x) + b := v.Block + // match: (XorMaskedUint64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+3] x) + b := v.Block + // match: (XorMaskedUint64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a476e66845627c..c6e8961738741b 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -23,6 +23,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) @@ -53,6 +65,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AddMasked", opLen3(ssa.OpAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AddMasked", opLen3(ssa.OpAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.AddMasked", opLen3(ssa.OpAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.AddMasked", opLen3(ssa.OpAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.AddMasked", opLen3(ssa.OpAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.AddMasked", opLen3(ssa.OpAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddMasked", opLen3(ssa.OpAddMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddMasked", opLen3(ssa.OpAddMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddMasked", opLen3(ssa.OpAddMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AddMasked", opLen3(ssa.OpAddMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddMasked", opLen3(ssa.OpAddMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AddMasked", opLen3(ssa.OpAddMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddMasked", opLen3(ssa.OpAddMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddMasked", opLen3(ssa.OpAddMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddMasked", opLen3(ssa.OpAddMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AddMasked", opLen3(ssa.OpAddMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AddMasked", opLen3(ssa.OpAddMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AddMasked", opLen3(ssa.OpAddMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AddMasked", opLen3(ssa.OpAddMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AddMasked", opLen3(ssa.OpAddMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AddMasked", opLen3(ssa.OpAddMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AddMasked", opLen3(ssa.OpAddMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddMasked", opLen3(ssa.OpAddMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AddMasked", opLen3(ssa.OpAddMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AddMasked", opLen3(ssa.OpAddMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AddMasked", opLen3(ssa.OpAddMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AddMasked", opLen3(ssa.OpAddMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AddMasked", opLen3(ssa.OpAddMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AddMasked", opLen3(ssa.OpAddMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AddMasked", opLen3(ssa.OpAddMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) @@ -77,6 +119,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AndMasked", opLen3(ssa.OpAndMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndMasked", opLen3(ssa.OpAndMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndMasked", opLen3(ssa.OpAndMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndMasked", opLen3(ssa.OpAndMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndMasked", opLen3(ssa.OpAndMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndMasked", opLen3(ssa.OpAndMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AndMasked", opLen3(ssa.OpAndMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndMasked", opLen3(ssa.OpAndMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndMasked", opLen3(ssa.OpAndMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndMasked", opLen3(ssa.OpAndMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndMasked", opLen3(ssa.OpAndMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndMasked", opLen3(ssa.OpAndMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) @@ -97,24 +151,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) @@ -125,36 +209,72 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.DivMasked", opLen3(ssa.OpDivMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.DivMasked", opLen3(ssa.OpDivMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.DivMasked", opLen3(ssa.OpDivMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.DivMasked", opLen3(ssa.OpDivMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.DivMasked", opLen3(ssa.OpDivMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.DivMasked", opLen3(ssa.OpDivMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) @@ -186,6 +306,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) @@ -196,33 +346,66 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Get128", opLen1Imm8(ssa.OpGet128Float32x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Float64x4.Get128", opLen1Imm8(ssa.OpGet128Float64x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x32.Get128", opLen1Imm8(ssa.OpGet128Int8x32, types.TypeVec128, 0), sys.AMD64) @@ -301,12 +484,78 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) @@ -367,771 +616,66 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.LessMasked", opLen3(ssa.OpLessMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.LessMasked", opLen3(ssa.OpLessMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.LessMasked", opLen3(ssa.OpLessMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.LessMasked", opLen3(ssa.OpLessMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.LessMasked", opLen3(ssa.OpLessMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.LessMasked", opLen3(ssa.OpLessMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.LessMasked", opLen3(ssa.OpLessMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.LessMasked", opLen3(ssa.OpLessMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.LessMasked", opLen3(ssa.OpLessMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.LessMasked", opLen3(ssa.OpLessMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.LessMasked", opLen3(ssa.OpLessMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.LessMasked", opLen3(ssa.OpLessMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.LessMasked", opLen3(ssa.OpLessMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.LessMasked", opLen3(ssa.OpLessMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.LessMasked", opLen3(ssa.OpLessMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.LessMasked", opLen3(ssa.OpLessMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.LessMasked", opLen3(ssa.OpLessMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.LessMasked", opLen3(ssa.OpLessMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.LessMasked", opLen3(ssa.OpLessMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.LessMasked", opLen3(ssa.OpLessMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.LessMasked", opLen3(ssa.OpLessMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.LessMasked", opLen3(ssa.OpLessMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.LessMasked", opLen3(ssa.OpLessMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.LessMasked", opLen3(ssa.OpLessMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.LessMasked", opLen3(ssa.OpLessMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.LessMasked", opLen3(ssa.OpLessMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.LessMasked", opLen3(ssa.OpLessMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.LessMasked", opLen3(ssa.OpLessMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.LessMasked", opLen3(ssa.OpLessMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.LessMasked", opLen3(ssa.OpLessMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) @@ -1162,6 +706,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) @@ -1192,6 +766,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MinMasked", opLen3(ssa.OpMinMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MinMasked", opLen3(ssa.OpMinMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MinMasked", opLen3(ssa.OpMinMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MinMasked", opLen3(ssa.OpMinMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MinMasked", opLen3(ssa.OpMinMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MinMasked", opLen3(ssa.OpMinMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MinMasked", opLen3(ssa.OpMinMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MinMasked", opLen3(ssa.OpMinMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MinMasked", opLen3(ssa.OpMinMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MinMasked", opLen3(ssa.OpMinMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MinMasked", opLen3(ssa.OpMinMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MinMasked", opLen3(ssa.OpMinMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MinMasked", opLen3(ssa.OpMinMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MinMasked", opLen3(ssa.OpMinMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MinMasked", opLen3(ssa.OpMinMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MinMasked", opLen3(ssa.OpMinMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MinMasked", opLen3(ssa.OpMinMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MinMasked", opLen3(ssa.OpMinMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MinMasked", opLen3(ssa.OpMinMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MinMasked", opLen3(ssa.OpMinMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MinMasked", opLen3(ssa.OpMinMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MinMasked", opLen3(ssa.OpMinMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MinMasked", opLen3(ssa.OpMinMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MinMasked", opLen3(ssa.OpMinMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MinMasked", opLen3(ssa.OpMinMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MinMasked", opLen3(ssa.OpMinMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MinMasked", opLen3(ssa.OpMinMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MinMasked", opLen3(ssa.OpMinMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MinMasked", opLen3(ssa.OpMinMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MinMasked", opLen3(ssa.OpMinMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) @@ -1204,6 +808,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) @@ -1214,12 +824,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) @@ -1229,6 +851,21 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulMasked", opLen3(ssa.OpMulMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulMasked", opLen3(ssa.OpMulMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulMasked", opLen3(ssa.OpMulMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -1259,6 +896,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) @@ -1279,12 +946,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.OrMasked", opLen3(ssa.OpOrMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.OrMasked", opLen3(ssa.OpOrMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.OrMasked", opLen3(ssa.OpOrMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.OrMasked", opLen3(ssa.OpOrMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.OrMasked", opLen3(ssa.OpOrMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.OrMasked", opLen3(ssa.OpOrMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.OrMasked", opLen3(ssa.OpOrMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.OrMasked", opLen3(ssa.OpOrMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.OrMasked", opLen3(ssa.OpOrMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.OrMasked", opLen3(ssa.OpOrMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.OrMasked", opLen3(ssa.OpOrMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.OrMasked", opLen3(ssa.OpOrMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) @@ -1333,6 +1018,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1345,6 +1054,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1357,6 +1078,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int32x4.RotateLeft", opLen2(ssa.OpRotateLeftInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.RotateLeft", opLen2(ssa.OpRotateLeftInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.RotateLeft", opLen2(ssa.OpRotateLeftInt32x16, types.TypeVec512), sys.AMD64) @@ -1369,6 +1102,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateLeft", opLen2(ssa.OpRotateLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateLeft", opLen2(ssa.OpRotateLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateLeft", opLen2(ssa.OpRotateLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateRight", opLen2(ssa.OpRotateRightInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.RotateRight", opLen2(ssa.OpRotateRightInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.RotateRight", opLen2(ssa.OpRotateRightInt32x16, types.TypeVec512), sys.AMD64) @@ -1381,6 +1126,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateRight", opLen2(ssa.OpRotateRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateRight", opLen2(ssa.OpRotateRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateRight", opLen2(ssa.OpRotateRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) @@ -1391,6 +1148,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) @@ -1403,9 +1166,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) @@ -1422,15 +1200,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) @@ -1481,6 +1280,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x4, types.TypeVec128), sys.AMD64) @@ -1513,6 +1336,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) @@ -1520,6 +1367,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x32, types.TypeVec512), sys.AMD64) @@ -1556,6 +1406,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftRight", opLen2(ssa.OpShiftRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftRight", opLen2(ssa.OpShiftRightInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftRight", opLen2(ssa.OpShiftRightInt16x32, types.TypeVec512), sys.AMD64) @@ -1592,6 +1478,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) @@ -1610,6 +1532,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) @@ -1622,6 +1562,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) @@ -1652,6 +1598,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SubMasked", opLen3(ssa.OpSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.SubMasked", opLen3(ssa.OpSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SubMasked", opLen3(ssa.OpSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.SubMasked", opLen3(ssa.OpSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.SubMasked", opLen3(ssa.OpSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SubMasked", opLen3(ssa.OpSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SubMasked", opLen3(ssa.OpSubMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SubMasked", opLen3(ssa.OpSubMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SubMasked", opLen3(ssa.OpSubMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SubMasked", opLen3(ssa.OpSubMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubMasked", opLen3(ssa.OpSubMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SubMasked", opLen3(ssa.OpSubMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SubMasked", opLen3(ssa.OpSubMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SubMasked", opLen3(ssa.OpSubMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SubMasked", opLen3(ssa.OpSubMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.SubMasked", opLen3(ssa.OpSubMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.SubMasked", opLen3(ssa.OpSubMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.SubMasked", opLen3(ssa.OpSubMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SubMasked", opLen3(ssa.OpSubMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SubMasked", opLen3(ssa.OpSubMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SubMasked", opLen3(ssa.OpSubMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SubMasked", opLen3(ssa.OpSubMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubMasked", opLen3(ssa.OpSubMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SubMasked", opLen3(ssa.OpSubMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.SubMasked", opLen3(ssa.OpSubMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SubMasked", opLen3(ssa.OpSubMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SubMasked", opLen3(ssa.OpSubMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.SubMasked", opLen3(ssa.OpSubMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.SubMasked", opLen3(ssa.OpSubMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.SubMasked", opLen3(ssa.OpSubMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) @@ -1662,12 +1638,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) @@ -1688,6 +1676,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.XorMasked", opLen3(ssa.OpXorMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.XorMasked", opLen3(ssa.OpXorMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.XorMasked", opLen3(ssa.OpXorMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.XorMasked", opLen3(ssa.OpXorMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.XorMasked", opLen3(ssa.OpXorMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.XorMasked", opLen3(ssa.OpXorMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.XorMasked", opLen3(ssa.OpXorMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.XorMasked", opLen3(ssa.OpXorMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.XorMasked", opLen3(ssa.OpXorMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.XorMasked", opLen3(ssa.OpXorMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.XorMasked", opLen3(ssa.OpXorMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.XorMasked", opLen3(ssa.OpXorMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index fa99bba7bb7bed..26a0d3e9ad4e74 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -66,6 +66,68 @@ func (x Int64x4) Absolute() Int64x4 // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) Absolute() Int64x8 +/* AbsoluteMasked */ + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 + /* Add */ // Add adds corresponding elements of two vectors. @@ -218,6 +280,158 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) Add(y Uint64x8) Uint64x8 +/* AddMasked */ + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* AddSub */ // AddSub subtracts even elements and adds odd elements of two vectors. @@ -342,6 +556,68 @@ func (x Uint64x4) And(y Uint64x4) Uint64x4 // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) And(y Uint64x8) Uint64x8 +/* AndMasked */ + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* AndNot */ // AndNot performs a bitwise AND NOT operation between two vectors. @@ -444,41 +720,135 @@ func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 -/* ApproximateReciprocal */ +/* AndNotMasked */ -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) ApproximateReciprocal() Float32x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) ApproximateReciprocal() Float32x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocal() Float32x16 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocal() Float64x2 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocal() Float64x4 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocal() Float64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 -/* ApproximateReciprocalOfSqrt */ +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 + +/* ApproximateReciprocal */ + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) ApproximateReciprocal() Float32x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocal() Float32x8 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocal() Float32x16 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocal() Float64x2 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocal() Float64x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocal() Float64x8 + +/* ApproximateReciprocalMasked */ + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 + +/* ApproximateReciprocalOfSqrt */ + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 @@ -508,6 +878,38 @@ func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 +/* ApproximateReciprocalOfSqrtMasked */ + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 + /* Average */ // Average computes the rounded average of corresponding elements. @@ -540,6 +942,38 @@ func (x Uint16x16) Average(y Uint16x16) Uint16x16 // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) Average(y Uint16x32) Uint16x32 +/* AverageMasked */ + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 + /* Ceil */ // Ceil rounds elements up to the nearest integer. @@ -594,6 +1028,38 @@ func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 +/* CeilWithPrecisionMasked */ + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. @@ -626,6 +1092,38 @@ func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 +/* DiffWithCeilWithPrecisionMasked */ + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. @@ -658,6 +1156,38 @@ func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 +/* DiffWithFloorWithPrecisionMasked */ + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. @@ -690,6 +1220,38 @@ func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 +/* DiffWithRoundWithPrecisionMasked */ + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. @@ -722,6 +1284,38 @@ func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 +/* DiffWithTruncWithPrecisionMasked */ + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* Div */ // Div divides elements of two vectors. @@ -754,9 +1348,41 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) Div(y Float64x8) Float64x8 -/* DotProdBroadcast */ +/* DivMasked */ -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 + +/* DotProdBroadcast */ + +// DotProdBroadcast multiplies all elements and broadcasts the sum. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 @@ -913,6 +1539,158 @@ func (x Uint64x4) Equal(y Uint64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Equal(y Uint64x8) Mask64x8 +/* EqualMasked */ + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 + /* Floor */ // Floor rounds elements down to the nearest integer. @@ -967,6 +1745,38 @@ func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 +/* FloorWithPrecisionMasked */ + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* FusedMultiplyAdd */ // FusedMultiplyAdd performs `(v1 * v2) + v3`. @@ -999,6 +1809,38 @@ func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 +/* FusedMultiplyAddMasked */ + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + /* FusedMultiplyAddSub */ // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. @@ -1031,6 +1873,38 @@ func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 +/* FusedMultiplyAddSubMasked */ + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + /* FusedMultiplySubAdd */ // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. @@ -1063,6 +1937,38 @@ func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 +/* FusedMultiplySubAddMasked */ + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + /* GaloisFieldAffineTransform */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): @@ -1091,7 +1997,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 /* GaloisFieldAffineTransformInversed */ -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y @@ -1100,7 +2006,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y @@ -1109,7 +2015,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y @@ -1118,12 +2024,67 @@ func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 -/* GaloisFieldMul */ +/* GaloisFieldAffineTransformInversedMasked */ -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformInversedMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformInversedMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformInversedMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* GaloisFieldAffineTransformMasked */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* GaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 // GaloisFieldMul computes element-wise GF(2^8) multiplication with @@ -1138,6 +2099,26 @@ func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 +/* GaloisFieldMulMasked */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 + /* Get128 */ // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. @@ -1536,4552 +2517,1285 @@ func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 -/* IsNan */ - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) IsNan(y Float32x4) Mask32x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) IsNan(y Float32x8) Mask32x8 +/* GreaterEqualMasked */ -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// GreaterEqual compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) IsNan(y Float32x16) Mask32x16 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) IsNan(y Float64x2) Mask64x2 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) IsNan(y Float64x4) Mask64x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) IsNan(y Float64x8) Mask64x8 - -/* Less */ - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Less(y Float32x4) Mask32x4 +func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Less(y Float32x8) Mask32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Less(y Float32x16) Mask32x16 +func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Less(y Float64x2) Mask64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Less(y Float64x4) Mask64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Less(y Float64x8) Mask64x8 +func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) Less(y Int8x16) Mask8x16 +func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) Less(y Int8x32) Mask8x32 +func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) Less(y Int8x64) Mask8x64 +func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) Less(y Int16x8) Mask16x8 +func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) Less(y Int16x16) Mask16x16 +func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) Less(y Int16x32) Mask16x32 +func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) Less(y Int32x4) Mask32x4 +func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) Less(y Int32x8) Mask32x8 +func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) Less(y Int32x16) Mask32x16 +func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Less(y Int64x2) Mask64x2 +func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) Less(y Int64x4) Mask64x4 +func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) Less(y Int64x8) Mask64x8 +func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Less(y Uint8x16) Mask8x16 +func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Less(y Uint8x32) Mask8x32 +func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Less(y Uint8x64) Mask8x64 +func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Less(y Uint16x8) Mask16x8 +func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Less(y Uint16x16) Mask16x16 +func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Less(y Uint16x32) Mask16x32 +func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Less(y Uint32x4) Mask32x4 +func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Less(y Uint32x8) Mask32x8 +func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Less(y Uint32x16) Mask32x16 +func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Less(y Uint64x2) Mask64x2 +func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Less(y Uint64x4) Mask64x4 +func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Less(y Uint64x8) Mask64x8 +func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 -/* LessEqual */ +/* GreaterMasked */ -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) LessEqual(y Float32x4) Mask32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) LessEqual(y Float32x8) Mask32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) LessEqual(y Float32x16) Mask32x16 +func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) LessEqual(y Float64x2) Mask64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) LessEqual(y Float64x4) Mask64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) LessEqual(y Float64x8) Mask64x8 +func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 +func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 +func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) LessEqual(y Int8x64) Mask8x64 +func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 +func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 +func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) LessEqual(y Int16x32) Mask16x32 +func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 +func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 +func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) LessEqual(y Int32x16) Mask32x16 +func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 +func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 +func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) LessEqual(y Int64x8) Mask64x8 +func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 +func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 +func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 +func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 +func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 +func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 +func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 +func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 +func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 +func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 +func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 +func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 +func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 -/* MaskedAbsolute */ +/* IsNan */ -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) IsNan(y Float32x4) Mask32x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) IsNan(y Float32x8) Mask32x8 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) IsNan(y Float32x16) Mask32x16 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) IsNan(y Float64x2) Mask64x2 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) IsNan(y Float64x4) Mask64x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) IsNan(y Float64x8) Mask64x8 -// Absolute computes the absolute value of each element. +/* IsNanMasked */ + +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 -/* MaskedAdd */ +/* Less */ -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Less(y Float32x4) Mask32x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Less(y Float32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Less(y Float32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Less(y Float64x2) Mask64x2 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Less(y Float64x4) Mask64x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Less(y Float64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) Less(y Int8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) Less(y Int8x32) Mask8x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Less(y Int8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) Less(y Int16x8) Mask16x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) Less(y Int16x16) Mask16x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Less(y Int16x32) Mask16x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) Less(y Int32x4) Mask32x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) Less(y Int32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Less(y Int32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Less(y Int64x2) Mask64x2 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) Less(y Int64x4) Mask64x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Less(y Int64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Less(y Uint8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Less(y Uint8x32) Mask8x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Less(y Uint16x8) Mask16x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Less(y Uint16x16) Mask16x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Less(y Uint32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Less(y Uint64x4) Mask64x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Less(y Uint64x8) Mask64x8 -/* MaskedAnd */ +/* LessEqual */ -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedAndNot */ +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedApproximateReciprocal */ +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 -/* MaskedApproximateReciprocalOfSqrt */ +/* LessEqualMasked */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 - -/* MaskedAverage */ +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedCeilWithPrecision */ +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithCeilWithPrecision */ +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithFloorWithPrecision */ +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithRoundWithPrecision */ - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithTruncWithPrecision */ - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiv */ - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 - -// Div divides elements of two vectors. +// LessEqual compares for less than or equal. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 -/* MaskedEqual */ +/* LessMasked */ -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedFloorWithPrecision */ - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAdd */ - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub */ - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd */ - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedGaloisFieldAffineTransform */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 - -/* MaskedGaloisFieldAffineTransformInversed */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 - -/* MaskedGaloisFieldMul */ - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldMul(y Uint8x16, z Mask8x16) Uint8x16 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldMul(y Uint8x32, z Mask8x32) Uint8x32 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 - -/* MaskedGreater */ - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedGreaterEqual */ - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedIsNan */ - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 - -/* MaskedLess */ - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedLessEqual */ - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedMax */ - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMin */ - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMul */ - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 - -/* MaskedMulByPowOf2 */ - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 - -/* MaskedMulEvenWiden */ - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMulHigh */ - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedMulLow */ - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 - -/* MaskedNotEqual */ - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedOr */ - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedPairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 - -/* MaskedPairDotProdAccumulate */ - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 - -/* MaskedPopCount */ - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 - -/* MaskedRotateAllLeft */ - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Int32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Int32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Int32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Int64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Int64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Int64x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Uint32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Uint32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Uint32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Uint64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Uint64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Uint64x8 - -/* MaskedRotateAllRight */ - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Int32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Int32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Int32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Int64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Int64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Int64x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Uint32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Uint32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Uint32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Uint64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Uint64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Uint64x8 - -/* MaskedRotateLeft */ - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateLeft(y Int32x4, z Mask32x4) Int32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateLeft(y Int32x8, z Mask32x8) Int32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateLeft(y Int32x16, z Mask32x16) Int32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateLeft(y Int64x2, z Mask64x2) Int64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateLeft(y Int64x4, z Mask64x4) Int64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateLeft(y Int64x8, z Mask64x8) Int64x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateLeft(y Uint32x4, z Mask32x4) Uint32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateLeft(y Uint32x8, z Mask32x8) Uint32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateLeft(y Uint32x16, z Mask32x16) Uint32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateLeft(y Uint64x2, z Mask64x2) Uint64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateLeft(y Uint64x4, z Mask64x4) Uint64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateLeft(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedRotateRight */ - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateRight(y Int32x4, z Mask32x4) Int32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateRight(y Int32x8, z Mask32x8) Int32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateRight(y Int32x16, z Mask32x16) Int32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateRight(y Int64x2, z Mask64x2) Int64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateRight(y Int64x4, z Mask64x4) Int64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateRight(y Int64x8, z Mask64x8) Int64x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateRight(y Uint32x4, z Mask32x4) Uint32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateRight(y Uint32x8, z Mask32x8) Uint32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateRight(y Uint32x16, z Mask32x16) Uint32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateRight(y Uint64x2, z Mask64x2) Uint64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedRoundWithPrecision */ - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedSaturatedAdd */ - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedSaturatedPairDotProdAccumulate */ - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 - -/* MaskedSaturatedSub */ - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedSaturatedUnsignedSignedPairDotProd */ - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x16, z Mask16x8) Int16x8 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x32, z Mask16x16) Int16x16 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x64, z Mask16x32) Int16x32 - -/* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 - -/* MaskedShiftAllLeft */ - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Int64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Int64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Int64x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Uint64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Uint64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Uint64x8 - -/* MaskedShiftAllLeftAndFillUpperFrom */ - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRight */ - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Int64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Int64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Int64x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Uint64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Uint64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRightAndFillUpperFrom */ - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRightSignExtended */ - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRightSignExtended(y uint64, z Mask64x2) Int64x2 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRightSignExtended(y uint64, z Mask64x4) Int64x4 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRightSignExtended(y uint64, z Mask64x8) Int64x8 - -/* MaskedShiftLeft */ - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftLeft(y Int16x8, z Mask16x8) Int16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftLeft(y Int16x16, z Mask16x16) Int16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftLeft(y Int16x32, z Mask16x32) Int16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftLeft(y Int32x4, z Mask32x4) Int32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftLeft(y Int32x8, z Mask32x8) Int32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftLeft(y Int32x16, z Mask32x16) Int32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftLeft(y Int64x2, z Mask64x2) Int64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftLeft(y Int64x4, z Mask64x4) Int64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftLeft(y Int64x8, z Mask64x8) Int64x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftLeft(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftLeft(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftLeft(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftLeft(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftLeft(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftLeft(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftLeft(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftLeft(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftLeft(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftLeftAndFillUpperFrom */ - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 - -/* MaskedShiftRight */ - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRight(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRight(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRight(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRight(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRight(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRight(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRight(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRight(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRight(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRight(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRight(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRight(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRight(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRight(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRight(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRight(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRight(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRight(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftRightAndFillUpperFrom */ - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRightAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRightAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRightAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRightAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRightAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRightAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRightAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRightAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRightAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 - -/* MaskedShiftRightSignExtended */ - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRightSignExtended(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRightSignExtended(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRightSignExtended(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRightSignExtended(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRightSignExtended(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRightSignExtended(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRightSignExtended(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRightSignExtended(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRightSignExtended(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRightSignExtended(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRightSignExtended(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRightSignExtended(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRightSignExtended(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRightSignExtended(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRightSignExtended(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRightSignExtended(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRightSignExtended(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRightSignExtended(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedSqrt */ - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 - -/* MaskedSub */ - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 - -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +/* Max */ + +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 -/* MaskedTruncWithPrecision */ +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 -/* MaskedUnsignedSignedQuadDotProdAccumulate */ +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 -/* MaskedXor */ +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 -/* Max */ +/* MaxMasked */ // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 // Max computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) Max(y Float32x16) Float32x16 +func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 // Max computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) Max(y Float64x8) Float64x8 +func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) Max(y Int8x64) Int8x64 +func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) Max(y Int16x32) Int16x32 +func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) Max(y Int32x16) Int32x16 +func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Max(y Int64x2) Int64x2 +func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Max(y Int64x4) Int64x4 +func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Max(y Int64x8) Int64x8 +func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Max(y Uint8x64) Uint8x64 +func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Max(y Uint16x32) Uint16x32 +func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Max(y Uint32x16) Uint32x16 +func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Max(y Uint64x2) Uint64x2 +func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Max(y Uint64x4) Uint64x4 +func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Max(y Uint64x8) Uint64x8 +func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Min */ @@ -6235,6 +3949,158 @@ func (x Uint64x4) Min(y Uint64x4) Uint64x4 // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Min(y Uint64x8) Uint64x8 +/* MinMasked */ + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* Mul */ // Mul multiplies corresponding elements of two vectors. @@ -6299,6 +4165,38 @@ func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 +/* MulByPowOf2Masked */ + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 + /* MulEvenWiden */ // MulEvenWiden multiplies even-indexed elements, widening the result. @@ -6361,6 +4259,44 @@ func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +/* MulEvenWidenMasked */ + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* MulHigh */ // MulHigh multiplies elements and stores the high part of the result. @@ -6368,30 +4304,62 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 // Asm: VPMULHW, CPU Feature: AVX func (x Int16x8) MulHigh(y Int16x8) Int16x8 -// MulHigh multiplies elements and stores the high part of the result. +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +/* MulHighMasked */ + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 -// MulHigh multiplies elements and stores the high part of the result. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 -// MulHigh multiplies elements and stores the high part of the result. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 /* MulLow */ @@ -6440,6 +4408,85 @@ func (x Int64x4) MulLow(y Int64x4) Int64x4 // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MulLow(y Int64x8) Int64x8 +/* MulLowMasked */ + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 + +/* MulMasked */ + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 + /* NotEqual */ // NotEqual compares for inequality. @@ -6592,6 +4639,158 @@ func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +/* NotEqualMasked */ + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 + /* Or */ // Or performs a bitwise OR operation between two vectors. @@ -6659,40 +4858,102 @@ func (x Uint8x32) Or(y Uint8x32) Uint8x32 // Asm: VPOR, CPU Feature: AVX func (x Uint16x8) Or(y Uint16x8) Uint16x8 -// Or performs a bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +/* OrMasked */ + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 // Or performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Or(y Uint32x16) Uint32x16 +func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 // Or performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Or(y Uint64x8) Uint64x8 +func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 /* PairDotProd */ @@ -6731,6 +4992,43 @@ func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +/* PairDotProdAccumulateMasked */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 + +/* PairDotProdMasked */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProdMasked(y Int16x32, z Mask16x32) Int32x16 + /* PairwiseAdd */ // PairwiseAdd horizontally adds adjacent pairs of elements. @@ -7001,6 +5299,128 @@ func (x Uint64x4) PopCount() Uint64x4 // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) PopCount() Uint64x8 +/* PopCountMasked */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 + /* RotateAllLeft */ // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. @@ -7063,6 +5483,68 @@ func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 +/* RotateAllLeftMasked */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Uint64x8 + /* RotateAllRight */ // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. @@ -7108,22 +5590,84 @@ func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 +func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 + +/* RotateAllRightMasked */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 +func (x Uint64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 +func (x Uint64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 +func (x Uint64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Uint64x8 /* RotateLeft */ @@ -7187,6 +5731,68 @@ func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 +/* RotateLeftMasked */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* RotateRight */ // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. @@ -7249,6 +5855,68 @@ func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 +/* RotateRightMasked */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* Round */ // Round rounds elements to the nearest integer. @@ -7303,6 +5971,38 @@ func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 +/* RoundWithPrecisionMasked */ + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* SaturatedAdd */ // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -7365,6 +6065,68 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +/* SaturatedAddMasked */ + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 + /* SaturatedPairDotProdAccumulate */ // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. @@ -7382,6 +6144,23 @@ func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +/* SaturatedPairDotProdAccumulateMasked */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 + /* SaturatedPairwiseAdd */ // SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. @@ -7472,25 +6251,107 @@ func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -/* SaturatedUnsignedSignedPairDotProd */ +/* SaturatedSubMasked */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 + +/* SaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 + +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 + +/* SaturatedUnsignedSignedPairDotProdMasked */ -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 +func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, z Mask16x32) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ @@ -7524,6 +6385,38 @@ func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + /* Set128 */ // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. @@ -7800,6 +6693,148 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +/* ShiftAllLeftAndFillUpperFromMasked */ + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 + +/* ShiftAllLeftMasked */ + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 + /* ShiftAllRight */ // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7852,135 +6887,277 @@ func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 // Asm: VPSRLD, CPU Feature: AVX func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 + +/* ShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLD, CPU Feature: AVX2 -func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLQ, CPU Feature: AVX -func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLQ, CPU Feature: AVX2 -func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 -/* ShiftAllRightAndFillUpperFrom */ +/* ShiftAllRightAndFillUpperFromMasked */ // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 + +/* ShiftAllRightMasked */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftAllRightSignExtended */ @@ -8019,6 +7196,23 @@ func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 +/* ShiftAllRightSignExtendedMasked */ + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x2) Int64x2 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x4) Int64x4 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x8) Int64x8 + /* ShiftLeft */ // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -8209,17 +7403,219 @@ func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 + +/* ShiftLeftAndFillUpperFromMasked */ + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 + +/* ShiftLeftMasked */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ShiftRight */ @@ -8423,6 +7819,208 @@ func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +/* ShiftRightAndFillUpperFromMasked */ + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 + +/* ShiftRightMasked */ + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* ShiftRightSignExtended */ // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. @@ -8483,37 +8081,129 @@ func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 + +/* ShiftRightSignExtendedMasked */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightSignExtendedMasked(y Int16x8, z Mask16x8) Int16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightSignExtendedMasked(y Int16x16, z Mask16x16) Int16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightSignExtendedMasked(y Int16x32, z Mask16x32) Int16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightSignExtendedMasked(y Int32x4, z Mask32x4) Int32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightSignExtendedMasked(y Int32x8, z Mask32x8) Int32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightSignExtendedMasked(y Int32x16, z Mask32x16) Int32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightSignExtendedMasked(y Int64x2, z Mask64x2) Int64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightSignExtendedMasked(y Int64x4, z Mask64x4) Int64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightSignExtendedMasked(y Int64x8, z Mask64x8) Int64x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightSignExtendedMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightSignExtendedMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightSignExtendedMasked(y Uint16x32, z Mask16x32) Uint16x32 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightSignExtendedMasked(y Uint32x4, z Mask32x4) Uint32x4 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightSignExtendedMasked(y Uint32x8, z Mask32x8) Uint32x8 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftRightSignExtendedMasked(y Uint32x16, z Mask32x16) Uint32x16 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftRightSignExtendedMasked(y Uint64x2, z Mask64x2) Uint64x2 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftRightSignExtendedMasked(y Uint64x4, z Mask64x4) Uint64x4 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftRightSignExtendedMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Sign */ @@ -8585,6 +8275,38 @@ func (x Float64x4) Sqrt() Float64x4 // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) Sqrt() Float64x8 +/* SqrtMasked */ + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 + /* Sub */ // Sub subtracts corresponding elements of two vectors. @@ -8737,6 +8459,158 @@ func (x Uint64x4) Sub(y Uint64x4) Uint64x4 // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) Sub(y Uint64x8) Uint64x8 +/* SubMasked */ + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* Trunc */ // Trunc truncates elements towards zero. @@ -8791,6 +8665,38 @@ func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 +/* TruncWithPrecisionMasked */ + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) TruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) TruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) TruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) TruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) TruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) TruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* UnsignedSignedQuadDotProdAccumulate */ // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. @@ -8823,6 +8729,38 @@ func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uin // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* UnsignedSignedQuadDotProdAccumulateMasked */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + /* Xor */ // Xor performs a bitwise XOR operation between two vectors. @@ -8925,6 +8863,68 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Xor(y Uint64x8) Uint64x8 +/* XorMasked */ + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 + // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index e2324e8da5c5ea..ebe241c4674466 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -43,7 +43,7 @@ func TestType(t *testing.T) { return } v.z = maskT(simd.LoadInt32x4(&maskv).AsMask32x4()) - *v.y = v.y.MaskedAdd(v.x, simd.Mask32x4(v.z)) + *v.y = v.y.AddMasked(v.x, simd.Mask32x4(v.z)) got := [4]int32{} v.y.Store(&got) @@ -125,7 +125,7 @@ func TestMaskConversion(t *testing.T) { mask := y.Sub(x).AsMask32x4() v = [4]int32{5, 6, 7, 8} y = simd.LoadInt32x4(&v) - y = y.MaskedAdd(x, mask) + y = y.AddMasked(x, mask) got := [4]int32{6, 0, 8, 0} y.Store(&v) for i := range 4 { @@ -148,7 +148,7 @@ func TestMaskedAdd(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } - testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "MaskedAdd") + testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "AddMasked") } // checkInt8Slices ensures that b and a are equal, to the end of b. diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index d4cf7f6b7413e2..bdbb25bfce4e6a 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -56,20 +56,20 @@ func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in vec1 := simd.LoadFloat32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x4()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask32x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x4()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask32x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -123,20 +123,20 @@ func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []i vec1 := simd.LoadFloat32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -184,12 +184,12 @@ func testFloat32x4TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []f vec2 := simd.LoadFloat32x4Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x4()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x4()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x4()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x4()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x4()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -241,12 +241,12 @@ func testFloat32x4UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []flo vec0 := simd.LoadFloat32x4Slice(v0) vec1 := simd.LoadInt32x4Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x4()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x4()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x4()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x4()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x4()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -306,20 +306,20 @@ func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in vec1 := simd.LoadFloat32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x8()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask32x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x8()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask32x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -373,20 +373,20 @@ func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []i vec1 := simd.LoadFloat32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -434,12 +434,12 @@ func testFloat32x8TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []f vec2 := simd.LoadFloat32x8Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x8()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x8()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x8()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x8()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x8()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -491,12 +491,12 @@ func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []flo vec0 := simd.LoadFloat32x8Slice(v0) vec1 := simd.LoadInt32x8Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x8()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x8()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x8()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x8()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x8()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -550,20 +550,20 @@ func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []i vec1 := simd.LoadFloat32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask32x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x16()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask32x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -617,20 +617,20 @@ func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 [] vec1 := simd.LoadFloat32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -678,12 +678,12 @@ func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 [] vec2 := simd.LoadFloat32x16Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x16()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x16()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -727,12 +727,12 @@ func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []fl vec0 := simd.LoadFloat32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x16()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x16()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -794,20 +794,20 @@ func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in vec1 := simd.LoadFloat64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x2()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask64x2()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x2()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask64x2()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x2()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask64x2()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -861,20 +861,20 @@ func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []i vec1 := simd.LoadFloat64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -922,12 +922,12 @@ func testFloat64x2TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []f vec2 := simd.LoadFloat64x2Slice(v2) vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x2()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x2()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x2()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x2()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x2()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -979,12 +979,12 @@ func testFloat64x2UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo vec0 := simd.LoadFloat64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x2()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x2()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask64x2()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x2()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x2()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -1044,20 +1044,20 @@ func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in vec1 := simd.LoadFloat64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask64x4()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask64x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x4()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask64x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1111,20 +1111,20 @@ func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []i vec1 := simd.LoadFloat64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1172,12 +1172,12 @@ func testFloat64x4TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []f vec2 := simd.LoadFloat64x4Slice(v2) vec3 := simd.LoadInt64x4Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x4()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x4()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x4()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x4()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x4()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1229,12 +1229,12 @@ func testFloat64x4UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo vec0 := simd.LoadFloat64x4Slice(v0) vec1 := simd.LoadInt64x4Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x4()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x4()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask64x4()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x4()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x4()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1288,20 +1288,20 @@ func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in vec1 := simd.LoadFloat64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask64x8()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask64x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x8()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask64x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1355,20 +1355,20 @@ func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []i vec1 := simd.LoadFloat64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1416,12 +1416,12 @@ func testFloat64x8TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []f vec2 := simd.LoadFloat64x8Slice(v2) vec3 := simd.LoadInt64x8Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x8()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x8()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x8()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x8()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x8()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1465,12 +1465,12 @@ func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo vec0 := simd.LoadFloat64x8Slice(v0) vec1 := simd.LoadInt64x8Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x8()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x8()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask64x8()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x8()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x8()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1532,18 +1532,18 @@ func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want vec1 := simd.LoadInt8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) default: t.Errorf("Unknown method: Int8x16.%s", which) @@ -1595,18 +1595,18 @@ func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan vec1 := simd.LoadInt8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() default: t.Errorf("Unknown method: Int8x16.%s", which) @@ -1648,10 +1648,10 @@ func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi vec0 := simd.LoadInt8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask8x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x16()) default: t.Errorf("Unknown method: Int8x16.%s", which) @@ -1713,18 +1713,18 @@ func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want vec1 := simd.LoadInt8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) default: t.Errorf("Unknown method: Int8x32.%s", which) @@ -1776,18 +1776,18 @@ func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan vec1 := simd.LoadInt8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() default: t.Errorf("Unknown method: Int8x32.%s", which) @@ -1829,10 +1829,10 @@ func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi vec0 := simd.LoadInt8x32Slice(v0) vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask8x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x32()) default: t.Errorf("Unknown method: Int8x32.%s", which) @@ -1884,18 +1884,18 @@ func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want vec1 := simd.LoadInt8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) default: t.Errorf("Unknown method: Int8x64.%s", which) @@ -1947,18 +1947,18 @@ func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan vec1 := simd.LoadInt8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() default: t.Errorf("Unknown method: Int8x64.%s", which) @@ -2000,10 +2000,10 @@ func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi vec0 := simd.LoadInt8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask8x64()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x64()) default: t.Errorf("Unknown method: Int8x64.%s", which) @@ -2083,28 +2083,28 @@ func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, w vec1 := simd.LoadInt16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x8()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2125,8 +2125,8 @@ func testInt16x8BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int vec1 := simd.LoadInt16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x8()) + case "PairDotProdMasked": + gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2199,18 +2199,18 @@ func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2256,10 +2256,10 @@ func testInt16x8TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec2 := simd.LoadInt16x8Slice(v2) vec3 := simd.LoadInt16x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2301,10 +2301,10 @@ func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, vec0 := simd.LoadInt16x8Slice(v0) vec1 := simd.LoadInt16x8Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x8()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask16x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2384,28 +2384,28 @@ func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2426,8 +2426,8 @@ func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []in vec1 := simd.LoadInt16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) + case "PairDotProdMasked": + gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2500,18 +2500,18 @@ func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2557,10 +2557,10 @@ func testInt16x16TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec2 := simd.LoadInt16x16Slice(v2) vec3 := simd.LoadInt16x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2602,10 +2602,10 @@ func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, vec0 := simd.LoadInt16x16Slice(v0) vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask16x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2667,28 +2667,28 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2709,8 +2709,8 @@ func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []in vec1 := simd.LoadInt16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + case "PairDotProdMasked": + gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2783,18 +2783,18 @@ func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2840,10 +2840,10 @@ func testInt16x32TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec2 := simd.LoadInt16x32Slice(v2) vec3 := simd.LoadInt16x32Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2885,10 +2885,10 @@ func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, vec0 := simd.LoadInt16x32Slice(v0) vec1 := simd.LoadInt16x32Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask16x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2962,34 +2962,34 @@ func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w vec1 := simd.LoadInt32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3087,10 +3087,10 @@ func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int vec2 := simd.LoadInt16x8Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "PairDotProdAccumulateMasked": + gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) + case "SaturatedPairDotProdAccumulateMasked": + gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3111,18 +3111,18 @@ func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3168,10 +3168,10 @@ func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec2 := simd.LoadInt32x4Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3217,10 +3217,10 @@ func testInt32x4Uint8x16Int8x16Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []ui vec2 := simd.LoadInt8x16Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3262,10 +3262,10 @@ func testInt32x4UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, vec0 := simd.LoadInt32x4Slice(v0) vec1 := simd.LoadInt32x4Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x4()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask32x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3339,34 +3339,34 @@ func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w vec1 := simd.LoadInt32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3464,10 +3464,10 @@ func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []i vec2 := simd.LoadInt16x16Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "PairDotProdAccumulateMasked": + gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) + case "SaturatedPairDotProdAccumulateMasked": + gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3488,18 +3488,18 @@ func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3545,10 +3545,10 @@ func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec2 := simd.LoadInt32x8Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3594,10 +3594,10 @@ func testInt32x8Uint8x32Int8x32Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []ui vec2 := simd.LoadInt8x32Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3639,10 +3639,10 @@ func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, vec0 := simd.LoadInt32x8Slice(v0) vec1 := simd.LoadInt32x8Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x8()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask32x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3710,34 +3710,34 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x16()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3814,10 +3814,10 @@ func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 vec2 := simd.LoadInt16x32Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "PairDotProdAccumulateMasked": + gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) + case "SaturatedPairDotProdAccumulateMasked": + gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3838,18 +3838,18 @@ func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3895,10 +3895,10 @@ func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec2 := simd.LoadInt32x16Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3944,10 +3944,10 @@ func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 [ vec2 := simd.LoadInt8x64Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3989,10 +3989,10 @@ func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, vec0 := simd.LoadInt32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask32x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -4062,36 +4062,36 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w vec1 := simd.LoadInt64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x2()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4143,18 +4143,18 @@ func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec1 := simd.LoadInt64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4200,10 +4200,10 @@ func testInt64x2TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec2 := simd.LoadInt64x2Slice(v2) vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4245,10 +4245,10 @@ func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, vec0 := simd.LoadInt64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask64x2()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4318,36 +4318,36 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w vec1 := simd.LoadInt64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4399,18 +4399,18 @@ func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec1 := simd.LoadInt64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4456,10 +4456,10 @@ func testInt64x4TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec2 := simd.LoadInt64x4Slice(v2) vec3 := simd.LoadInt64x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4501,10 +4501,10 @@ func testInt64x4UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, vec0 := simd.LoadInt64x4Slice(v0) vec1 := simd.LoadInt64x4Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x4()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask64x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x4()) default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4574,36 +4574,36 @@ func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w vec1 := simd.LoadInt64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4655,18 +4655,18 @@ func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec1 := simd.LoadInt64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4712,10 +4712,10 @@ func testInt64x8TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec2 := simd.LoadInt64x8Slice(v2) vec3 := simd.LoadInt64x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4757,10 +4757,10 @@ func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, vec0 := simd.LoadInt64x8Slice(v0) vec1 := simd.LoadInt64x8Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x8()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask64x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x8()) default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4824,22 +4824,22 @@ func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w vec1 := simd.LoadUint8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) - case "MaskedGaloisFieldMul": - gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask8x16()) + case "GaloisFieldMulMasked": + gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -4912,8 +4912,8 @@ func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 vec1 := simd.LoadInt8x16Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) + case "SaturatedUnsignedSignedPairDotProdMasked": + gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -4934,18 +4934,18 @@ func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, vec1 := simd.LoadUint8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -4985,8 +4985,8 @@ func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, vec0 := simd.LoadUint8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x16()) default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -5050,22 +5050,22 @@ func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) - case "MaskedGaloisFieldMul": - gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask8x32()) + case "GaloisFieldMulMasked": + gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5138,8 +5138,8 @@ func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v vec1 := simd.LoadInt8x32Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + case "SaturatedUnsignedSignedPairDotProdMasked": + gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5160,18 +5160,18 @@ func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5211,8 +5211,8 @@ func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, vec0 := simd.LoadUint8x32Slice(v0) vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x32()) default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5268,22 +5268,22 @@ func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w vec1 := simd.LoadUint8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) - case "MaskedGaloisFieldMul": - gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x64()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask8x64()) + case "GaloisFieldMulMasked": + gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x64()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5356,8 +5356,8 @@ func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v vec1 := simd.LoadInt8x64Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) + case "SaturatedUnsignedSignedPairDotProdMasked": + gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5378,18 +5378,18 @@ func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, vec1 := simd.LoadUint8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5429,8 +5429,8 @@ func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, vec0 := simd.LoadUint8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x64()) default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5504,28 +5504,28 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 vec1 := simd.LoadUint16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask16x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5577,18 +5577,18 @@ func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 vec1 := simd.LoadUint16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5634,10 +5634,10 @@ func testUint16x8TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint vec2 := simd.LoadUint16x8Slice(v2) vec3 := simd.LoadInt16x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5677,8 +5677,8 @@ func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint1 vec0 := simd.LoadUint16x8Slice(v0) vec1 := simd.LoadInt16x8Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x8()) default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5752,28 +5752,28 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 vec1 := simd.LoadUint16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask16x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5825,18 +5825,18 @@ func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int vec1 := simd.LoadUint16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5882,10 +5882,10 @@ func testUint16x16TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uin vec2 := simd.LoadUint16x16Slice(v2) vec3 := simd.LoadInt16x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5925,8 +5925,8 @@ func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint vec0 := simd.LoadUint16x16Slice(v0) vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x16()) default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5988,28 +5988,28 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 vec1 := simd.LoadUint16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask16x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6061,18 +6061,18 @@ func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int vec1 := simd.LoadUint16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6118,10 +6118,10 @@ func testUint16x32TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uin vec2 := simd.LoadUint16x32Slice(v2) vec3 := simd.LoadInt16x32Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6161,8 +6161,8 @@ func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint vec0 := simd.LoadUint16x32Slice(v0) vec1 := simd.LoadInt16x32Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x32()) default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6232,32 +6232,32 @@ func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 vec1 := simd.LoadUint32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6330,18 +6330,18 @@ func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 vec1 := simd.LoadUint32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6387,10 +6387,10 @@ func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint vec2 := simd.LoadUint32x4Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6412,10 +6412,10 @@ func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 [ vec2 := simd.LoadInt8x16Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6479,8 +6479,8 @@ func testUint32x4UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint3 vec0 := simd.LoadUint32x4Slice(v0) vec1 := simd.LoadInt32x4Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6550,32 +6550,32 @@ func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 vec1 := simd.LoadUint32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6648,18 +6648,18 @@ func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 vec1 := simd.LoadUint32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6705,10 +6705,10 @@ func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint vec2 := simd.LoadUint32x8Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6730,10 +6730,10 @@ func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 [ vec2 := simd.LoadInt8x32Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6797,8 +6797,8 @@ func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint3 vec0 := simd.LoadUint32x8Slice(v0) vec1 := simd.LoadInt32x8Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6864,32 +6864,32 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 vec1 := simd.LoadUint32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -6941,18 +6941,18 @@ func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int vec1 := simd.LoadUint32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -6998,10 +6998,10 @@ func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uin vec2 := simd.LoadUint32x16Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -7023,10 +7023,10 @@ func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v vec2 := simd.LoadInt8x64Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -7090,8 +7090,8 @@ func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint vec0 := simd.LoadUint32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -7159,34 +7159,34 @@ func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 vec1 := simd.LoadUint64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7238,18 +7238,18 @@ func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 vec1 := simd.LoadUint64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7295,10 +7295,10 @@ func testUint64x2TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint vec2 := simd.LoadUint64x2Slice(v2) vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7338,8 +7338,8 @@ func testUint64x2UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 vec0 := simd.LoadUint64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x2()) default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7407,34 +7407,34 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 vec1 := simd.LoadUint64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7486,18 +7486,18 @@ func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 vec1 := simd.LoadUint64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7543,10 +7543,10 @@ func testUint64x4TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint vec2 := simd.LoadUint64x4Slice(v2) vec3 := simd.LoadInt64x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7586,8 +7586,8 @@ func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 vec0 := simd.LoadUint64x4Slice(v0) vec1 := simd.LoadInt64x4Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x4()) default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7655,34 +7655,34 @@ func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 vec1 := simd.LoadUint64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7734,18 +7734,18 @@ func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 vec1 := simd.LoadUint64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7791,10 +7791,10 @@ func testUint64x8TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint vec2 := simd.LoadUint64x8Slice(v2) vec3 := simd.LoadInt64x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7834,8 +7834,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 vec0 := simd.LoadUint64x8Slice(v0) vec1 := simd.LoadInt64x8Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x8()) default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7851,40 +7851,40 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 /* The operations below cannot be tested via wrappers, please test them directly */ // CeilWithPrecision +// CeilWithPrecisionMasked // DiffWithCeilWithPrecision +// DiffWithCeilWithPrecisionMasked // DiffWithFloorWithPrecision +// DiffWithFloorWithPrecisionMasked // DiffWithRoundWithPrecision +// DiffWithRoundWithPrecisionMasked // DiffWithTruncWithPrecision +// DiffWithTruncWithPrecisionMasked // FloorWithPrecision +// FloorWithPrecisionMasked // GaloisFieldAffineTransform // GaloisFieldAffineTransformInversed +// GaloisFieldAffineTransformInversedMasked +// GaloisFieldAffineTransformMasked // Get128 // GetElem -// MaskedCeilWithPrecision -// MaskedDiffWithCeilWithPrecision -// MaskedDiffWithFloorWithPrecision -// MaskedDiffWithRoundWithPrecision -// MaskedDiffWithTruncWithPrecision -// MaskedFloorWithPrecision -// MaskedGaloisFieldAffineTransform -// MaskedGaloisFieldAffineTransformInversed -// MaskedRotateAllLeft -// MaskedRotateAllRight -// MaskedRoundWithPrecision -// MaskedShiftAllLeft -// MaskedShiftAllLeftAndFillUpperFrom -// MaskedShiftAllRight -// MaskedShiftAllRightAndFillUpperFrom -// MaskedShiftAllRightSignExtended -// MaskedTruncWithPrecision // RotateAllLeft +// RotateAllLeftMasked // RotateAllRight +// RotateAllRightMasked // RoundWithPrecision +// RoundWithPrecisionMasked // Set128 // SetElem // ShiftAllLeft // ShiftAllLeftAndFillUpperFrom +// ShiftAllLeftAndFillUpperFromMasked +// ShiftAllLeftMasked // ShiftAllRight // ShiftAllRightAndFillUpperFrom +// ShiftAllRightAndFillUpperFromMasked +// ShiftAllRightMasked // ShiftAllRightSignExtended +// ShiftAllRightSignExtendedMasked // TruncWithPrecision +// TruncWithPrecisionMasked From 5429328b0cc6a6749c37a7a91ecee8b8eb644c2a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 20:12:24 +0000 Subject: [PATCH 069/139] [dev.simd] cmd/compile: change register mask names for simd ops This CL contains codes generated by CL 686556. Change-Id: I4d7287476b478efdc186a64c12de33528c7fb0af Reviewed-on: https://go-review.googlesource.com/c/go/+/686476 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 32 +- src/cmd/compile/internal/amd64/ssa.go | 104 +- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 75 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 1742 ++++++++--------- 4 files changed, 997 insertions(+), 956 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 50339bf202d09e..d87548c27faa5b 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -54,7 +54,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPD128, ssa.OpAMD64VSQRTPD256, ssa.OpAMD64VSQRTPD512: - p = simdFp11(s, v) + p = simdV11(s, v) case ssa.OpAMD64VADDPS128, ssa.OpAMD64VADDPS256, @@ -318,7 +318,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, ssa.OpAMD64VPXORQ512: - p = simdFp21(s, v) + p = simdV21(s, v) case ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, @@ -545,7 +545,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: - p = simdFp2kfp(s, v) + p = simdV2kv(s, v) case ssa.OpAMD64VPABSBMasked128, ssa.OpAMD64VPABSBMasked256, @@ -589,7 +589,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512: - p = simdFpkfp(s, v) + p = simdVkv(s, v) case ssa.OpAMD64VROUNDPS128, ssa.OpAMD64VROUNDPS256, @@ -621,7 +621,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQ128, ssa.OpAMD64VPRORQ256, ssa.OpAMD64VPRORQ512: - p = simdFp11Imm8(s, v) + p = simdV11Imm8(s, v) case ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, @@ -647,7 +647,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQMasked128, ssa.OpAMD64VPRORQMasked256, ssa.OpAMD64VPRORQMasked512: - p = simdFpkfpImm8(s, v) + p = simdVkvImm8(s, v) case ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, @@ -680,7 +680,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQ128, ssa.OpAMD64VPSHRDQ256, ssa.OpAMD64VPSHRDQ512: - p = simdFp21Imm8(s, v) + p = simdV21Imm8(s, v) case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, @@ -708,7 +708,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPQ256: - p = simdFp2kImm8(s, v) + p = simdV2kImm8(s, v) case ssa.OpAMD64VCMPPSMasked128, ssa.OpAMD64VCMPPSMasked256, @@ -740,7 +740,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked128, ssa.OpAMD64VPCMPUQMasked256, ssa.OpAMD64VPCMPUQMasked512: - p = simdFp2kkImm8(s, v) + p = simdV2kkImm8(s, v) case ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, @@ -790,7 +790,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD128, ssa.OpAMD64VPDPBUSD256, ssa.OpAMD64VPDPBUSD512: - p = simdFp31ResultInArg0(s, v) + p = simdV31ResultInArg0(s, v) case ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, @@ -840,7 +840,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512: - p = simdFp3kfpResultInArg0(s, v) + p = simdV3kvResultInArg0(s, v) case ssa.OpAMD64VPSLLW128, ssa.OpAMD64VPSLLW256, @@ -863,7 +863,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQ128, ssa.OpAMD64VPSRAQ256, ssa.OpAMD64VPSRAQ512: - p = simdFpXfp(s, v) + p = simdVfpv(s, v) case ssa.OpAMD64VPSLLQMasked128, ssa.OpAMD64VPSLLQMasked256, @@ -874,19 +874,19 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, ssa.OpAMD64VPSRAQMasked512: - p = simdFpXkfp(s, v) + p = simdVfpkv(s, v) case ssa.OpAMD64VPINSRB128, ssa.OpAMD64VPINSRW128, ssa.OpAMD64VPINSRD128, ssa.OpAMD64VPINSRQ128: - p = simdFpgpfpImm8(s, v) + p = simdVgpvImm8(s, v) case ssa.OpAMD64VPEXTRB128, ssa.OpAMD64VPEXTRW128, ssa.OpAMD64VPEXTRD128, ssa.OpAMD64VPEXTRQ128: - p = simdFpgpImm8(s, v) + p = simdVgpImm8(s, v) case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, @@ -912,7 +912,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked128, ssa.OpAMD64VPSHRDQMasked256, ssa.OpAMD64VPSHRDQMasked512: - p = simdFp2kfpImm8(s, v) + p = simdV2kvImm8(s, v) default: // Unknown reg shape diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index fadac162820554..8bc7cf83a35be4 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1518,7 +1518,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } // Example instruction: VRSQRTPS X1, X1 -func simdFp11(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV11(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) @@ -1528,7 +1528,7 @@ func simdFp11(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPSUBD X1, X2, X3 -func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV21(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // Vector registers operands follows a right-to-left order. @@ -1543,7 +1543,7 @@ func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { // This function is to accustomize the shifts. // The 2nd arg is an XMM, and this function merely checks that. // Example instruction: VPSLLQ Z1, X1, Z2 -func simdFpXfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVfpv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // Vector registers operands follows a right-to-left order. @@ -1556,13 +1556,18 @@ func simdFpXfp(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPEQW Z26, Z30, K4 -func simdFp2k(s *ssagen.State, v *ssa.Value) *obj.Prog { - // simdReg handles mask and vector registers altogether - return simdFp21(s, v) +func simdV2k(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p } // Example instruction: VPMINUQ X21, X3, K3, X31 -func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV2kv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[1]) @@ -1572,7 +1577,7 @@ func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { // or "predicate" for "predicated AVX512 instructions" // sits right at the end of the operand list. // TODO: verify this assumption. - p.AddRestSourceReg(simdReg(v.Args[2])) + p.AddRestSourceReg(maskReg(v.Args[2])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p @@ -1581,35 +1586,42 @@ func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { // This function is to accustomize the shifts. // The 2nd arg is an XMM, and this function merely checks that. // Example instruction: VPSLLQ Z1, X1, K1, Z2 -func simdFpXkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVfpkv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[1].Reg() p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[2])) + p.AddRestSourceReg(maskReg(v.Args[2])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } // Example instruction: VPCMPEQW Z26, Z30, K1, K4 -func simdFp2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp2kfp(s, v) +func simdV2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p } // Example instruction: VPOPCNTB X14, K4, X16 -func simdFpkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVkv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) - p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(maskReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } // Example instruction: VROUNDPD $7, X2, X2 -func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1624,7 +1636,7 @@ func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VREDUCEPD $126, X1, K3, X31 -func simdFpkfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVkvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1633,14 +1645,14 @@ func simdFpkfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p.From.Offset = imm p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(maskReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } // Example instruction: VCMPPS $7, X2, X9, X2 -func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1656,7 +1668,7 @@ func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPINSRB $3, DX, X0, X0 -func simdFpgpfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVgpvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1672,12 +1684,23 @@ func simdFpgpfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPD $1, Z1, Z2, K1 -func simdFp2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp21Imm8(s, v) +func simdV2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p } // Example instruction: VPCMPD $1, Z1, Z2, K2, K1 -func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1687,18 +1710,18 @@ func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[2])) + p.AddRestSourceReg(maskReg(v.Args[2])) p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + p.To.Reg = maskReg(v) return p } -func simdFp2kfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp2kkImm8(s, v) +func simdV2kvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdV2kkImm8(s, v) } // Example instruction: VFMADD213PD Z2, Z1, Z0 -func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) @@ -1709,18 +1732,18 @@ func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VFMADD213PD Z2, Z1, K1, Z0 -func simdFp3kfpResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV3kvResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) p.AddRestSourceReg(simdReg(v.Args[1])) - p.AddRestSourceReg(simdReg(v.Args[3])) + p.AddRestSourceReg(maskReg(v.Args[3])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } -func simdFpgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1735,7 +1758,7 @@ func simdFpgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Currently unused -func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV31(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) @@ -1747,13 +1770,13 @@ func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Currently unused -func simdFp3kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV3kv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[3])) + p.AddRestSourceReg(maskReg(v.Args[3])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p @@ -1869,8 +1892,6 @@ func simdReg(v *ssa.Value) int16 { base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) } switch t.Size() { - case 8: - return v.Reg() // K registers case 16: return v.Reg() case 32: @@ -1881,6 +1902,19 @@ func simdReg(v *ssa.Value) int16 { panic("unreachable") } +// XXX k mask +func maskReg(v *ssa.Value) int16 { + t := v.Type + if !t.IsSIMD() { + base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) + } + switch t.Size() { + case 8: + return v.Reg() + } + panic("unreachable") +} + // XXX this is used for shift operations only. // regalloc will issue OpCopy with incorrect type, but the assigned // register should be correct, and this function is merely checking diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 9ff77736f03cf7..17cc799b328d27 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -109,6 +109,7 @@ func init() { gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15") g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") + v = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") x15 = buildReg("X15") mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") @@ -120,6 +121,7 @@ func init() { var ( gponly = []regMask{gp} fponly = []regMask{fp} + vonly = []regMask{v} maskonly = []regMask{mask} ) @@ -182,15 +184,20 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} - fp1k1 = regInfo{inputs: fponly, outputs: maskonly} - k1fp1 = regInfo{inputs: maskonly, outputs: fponly} - fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} - fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} - fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} - fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} - fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} - fp1gp1fp1 = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} + v11 = regInfo{inputs: vonly, outputs: vonly} + v21 = regInfo{inputs: []regMask{v, v}, outputs: vonly} + vk = regInfo{inputs: vonly, outputs: maskonly} + kv = regInfo{inputs: maskonly, outputs: vonly} + v2k = regInfo{inputs: []regMask{v, v}, outputs: maskonly} + vkv = regInfo{inputs: []regMask{v, mask}, outputs: vonly} + v2kv = regInfo{inputs: []regMask{v, v, mask}, outputs: vonly} + v2kk = regInfo{inputs: []regMask{v, v, mask}, outputs: maskonly} + v31 = regInfo{inputs: []regMask{v, v, v}, outputs: vonly} + v3kv = regInfo{inputs: []regMask{v, v, v, mask}, outputs: vonly} + vgpv = regInfo{inputs: []regMask{v, gp}, outputs: vonly} + vgp = regInfo{inputs: vonly, outputs: gponly} + vfpv = regInfo{inputs: []regMask{v, fp}, outputs: vonly} + vfpkv = regInfo{inputs: []regMask{v, fp, mask}, outputs: vonly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1234,37 +1241,37 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem - {name: "VPMOVMToVec8x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x64", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: kv, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec16x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x8", argLength: 1, reg: kv, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x16", argLength: 1, reg: kv, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: kv, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec32x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x4", argLength: 1, reg: kv, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x8", argLength: 1, reg: kv, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: kv, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec64x2", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x2", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x4", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, - {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x16ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x32ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, - {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x8ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x16ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, - {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x4ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x8ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, - {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x2ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x4ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, @@ -1301,7 +1308,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1, fpgp)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5abaa4a0bcc0a9..d16de27fddbaec 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,877 +1,877 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSW256", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLW256", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW256", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAW256", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVW256", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVW256", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVW256", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSWMasked512", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWD512", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVW512", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVW512", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVW512", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSW128", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLW128", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW128", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAW128", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVW128", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVW128", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVW128", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVD512", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVD512", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVD512", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVD512", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVD512", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSD128", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVD128", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVD128", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLD128", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLD128", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAD128", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVD128", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVD128", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVD128", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVD128", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVD128", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSD256", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVD256", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVD256", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLD256", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLD256", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAD256", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVD256", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVD256", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVD256", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVD256", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVD256", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQ128", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQ128", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQ128", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQ128", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ128", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVQ128", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVQ128", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQ128", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVQ128", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQ256", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQ256", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQ256", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQ256", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ256", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVQ256", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQ256", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQ256", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQ256", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQ512", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQ512", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ512", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ512", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQ512", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQ512", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQ512", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVQ512", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQ512", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQ512", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSB512", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: fp21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: fp11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS512", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: v11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PS512", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPS512", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PS128", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PS128", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PS256", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked256", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PS256", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPS256", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPS256", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPD128", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PD256", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPD256", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD512", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPD512", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD512", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD512", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVW256", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVW256", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSW512", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW512", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVW512", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVW512", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVW128", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVW128", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: v21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: v21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: v21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSD512", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTD512", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDS512", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS512", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVD512", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD512", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVD512", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD512", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORD512", argLength: 2, reg: v21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSD128", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDS128", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVD128", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBD256", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVD256", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVD256", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ128", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ128", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ128", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQ128", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQ128", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQ256", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQ256", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: v21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: v21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: v21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ512", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ512", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQ512", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ512", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQ512", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ512", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: v21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: v2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: v2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: vgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: vgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } From 574854fd863377a9467625c45ec842fd7d5fc341 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 19:24:30 +0000 Subject: [PATCH 070/139] [dev.simd] runtime: save Z16-Z31 registers in async preempt The register allocation will use the upper register soon, this CL is to enable that. Change-Id: I4d7285e08b95f4e6ebee72594dfbe8d1199f09ed Reviewed-on: https://go-review.googlesource.com/c/go/+/686498 TryBot-Bypass: David Chase Reviewed-by: Cherry Mui Commit-Queue: David Chase --- src/runtime/mkpreempt.go | 2 +- src/runtime/preempt_amd64.go | 16 +++++++++ src/runtime/preempt_amd64.s | 64 +++++++++++++++++++++++++++--------- 3 files changed, 65 insertions(+), 17 deletions(-) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 2bd2ef07fa8292..7786f342b501d0 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -300,7 +300,7 @@ func genAMD64(g *gen) { // Create layouts for X, Y, and Z registers. const ( numXRegs = 16 - numZRegs = 16 // TODO: If we start using upper registers, change to 32 + numZRegs = 32 numKRegs = 8 ) lZRegs := layout{sp: xReg} // Non-GP registers diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go index 88c0ddd34ade72..78dec40e1f1e94 100644 --- a/src/runtime/preempt_amd64.go +++ b/src/runtime/preempt_amd64.go @@ -19,6 +19,22 @@ type xRegs struct { Z13 [64]byte Z14 [64]byte Z15 [64]byte + Z16 [64]byte + Z17 [64]byte + Z18 [64]byte + Z19 [64]byte + Z20 [64]byte + Z21 [64]byte + Z22 [64]byte + Z23 [64]byte + Z24 [64]byte + Z25 [64]byte + Z26 [64]byte + Z27 [64]byte + Z28 [64]byte + Z29 [64]byte + Z30 [64]byte + Z31 [64]byte K0 uint64 K1 uint64 K2 uint64 diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s index c35de7f3b75726..a5b949a242cf19 100644 --- a/src/runtime/preempt_amd64.s +++ b/src/runtime/preempt_amd64.s @@ -95,14 +95,30 @@ saveAVX512: VMOVDQU64 Z13, 832(AX) VMOVDQU64 Z14, 896(AX) VMOVDQU64 Z15, 960(AX) - KMOVQ K0, 1024(AX) - KMOVQ K1, 1032(AX) - KMOVQ K2, 1040(AX) - KMOVQ K3, 1048(AX) - KMOVQ K4, 1056(AX) - KMOVQ K5, 1064(AX) - KMOVQ K6, 1072(AX) - KMOVQ K7, 1080(AX) + VMOVDQU64 Z16, 1024(AX) + VMOVDQU64 Z17, 1088(AX) + VMOVDQU64 Z18, 1152(AX) + VMOVDQU64 Z19, 1216(AX) + VMOVDQU64 Z20, 1280(AX) + VMOVDQU64 Z21, 1344(AX) + VMOVDQU64 Z22, 1408(AX) + VMOVDQU64 Z23, 1472(AX) + VMOVDQU64 Z24, 1536(AX) + VMOVDQU64 Z25, 1600(AX) + VMOVDQU64 Z26, 1664(AX) + VMOVDQU64 Z27, 1728(AX) + VMOVDQU64 Z28, 1792(AX) + VMOVDQU64 Z29, 1856(AX) + VMOVDQU64 Z30, 1920(AX) + VMOVDQU64 Z31, 1984(AX) + KMOVQ K0, 2048(AX) + KMOVQ K1, 2056(AX) + KMOVQ K2, 2064(AX) + KMOVQ K3, 2072(AX) + KMOVQ K4, 2080(AX) + KMOVQ K5, 2088(AX) + KMOVQ K6, 2096(AX) + KMOVQ K7, 2104(AX) JMP preempt preempt: CALL ·asyncPreempt2(SB) @@ -153,14 +169,30 @@ restoreAVX2: VMOVDQU 0(AX), Y0 JMP restoreGPs restoreAVX512: - KMOVQ 1080(AX), K7 - KMOVQ 1072(AX), K6 - KMOVQ 1064(AX), K5 - KMOVQ 1056(AX), K4 - KMOVQ 1048(AX), K3 - KMOVQ 1040(AX), K2 - KMOVQ 1032(AX), K1 - KMOVQ 1024(AX), K0 + KMOVQ 2104(AX), K7 + KMOVQ 2096(AX), K6 + KMOVQ 2088(AX), K5 + KMOVQ 2080(AX), K4 + KMOVQ 2072(AX), K3 + KMOVQ 2064(AX), K2 + KMOVQ 2056(AX), K1 + KMOVQ 2048(AX), K0 + VMOVDQU64 1984(AX), Z31 + VMOVDQU64 1920(AX), Z30 + VMOVDQU64 1856(AX), Z29 + VMOVDQU64 1792(AX), Z28 + VMOVDQU64 1728(AX), Z27 + VMOVDQU64 1664(AX), Z26 + VMOVDQU64 1600(AX), Z25 + VMOVDQU64 1536(AX), Z24 + VMOVDQU64 1472(AX), Z23 + VMOVDQU64 1408(AX), Z22 + VMOVDQU64 1344(AX), Z21 + VMOVDQU64 1280(AX), Z20 + VMOVDQU64 1216(AX), Z19 + VMOVDQU64 1152(AX), Z18 + VMOVDQU64 1088(AX), Z17 + VMOVDQU64 1024(AX), Z16 VMOVDQU64 960(AX), Z15 VMOVDQU64 896(AX), Z14 VMOVDQU64 832(AX), Z13 From 8db7f41674c35452c8f364f7b31c6d89c567862b Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 21:06:59 +0000 Subject: [PATCH 071/139] [dev.simd] cmd/compile: use upper registers for AVX512 simd ops This CL is generated by CL 686775. Change-Id: I10606cfdd4be015c8d251ba4275e1191d5bf0944 Reviewed-on: https://go-review.googlesource.com/c/go/+/686695 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 59 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 1322 ++--- src/cmd/compile/internal/ssa/opGen.go | 4932 +++++++++-------- 3 files changed, 3182 insertions(+), 3131 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 17cc799b328d27..150c609fc54776 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -62,6 +62,22 @@ var regNamesAMD64 = []string{ "X13", "X14", "X15", // constant 0 in ABIInternal + "X16", + "X17", + "X18", + "X19", + "X20", + "X21", + "X22", + "X23", + "X24", + "X25", + "X26", + "X27", + "X28", + "X29", + "X30", + "X31", // TODO: update asyncPreempt for K registers. // asyncPreempt also needs to store Z0-Z15 properly. @@ -110,6 +126,7 @@ func init() { g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") v = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") + w = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31") x15 = buildReg("X15") mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") @@ -122,6 +139,7 @@ func init() { gponly = []regMask{gp} fponly = []regMask{fp} vonly = []regMask{v} + wonly = []regMask{w} maskonly = []regMask{mask} ) @@ -184,6 +202,7 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + v01 = regInfo{inputs: nil, outputs: vonly} v11 = regInfo{inputs: vonly, outputs: vonly} v21 = regInfo{inputs: []regMask{v, v}, outputs: vonly} vk = regInfo{inputs: vonly, outputs: maskonly} @@ -199,6 +218,22 @@ func init() { vfpv = regInfo{inputs: []regMask{v, fp}, outputs: vonly} vfpkv = regInfo{inputs: []regMask{v, fp, mask}, outputs: vonly} + w01 = regInfo{inputs: nil, outputs: wonly} + w11 = regInfo{inputs: wonly, outputs: wonly} + w21 = regInfo{inputs: []regMask{w, w}, outputs: wonly} + wk = regInfo{inputs: wonly, outputs: maskonly} + kw = regInfo{inputs: maskonly, outputs: wonly} + w2k = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + wkw = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} + w2kw = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + w2kk = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + w31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + w3kw = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + wgpw = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} + wgp = regInfo{inputs: wonly, outputs: gponly} + wfpw = regInfo{inputs: []regMask{w, fp}, outputs: wonly} + wfpkw = regInfo{inputs: []regMask{w, fp, mask}, outputs: wonly} + prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1243,39 +1278,39 @@ func init() { {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x64", argLength: 1, reg: kv, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: kw, asm: "VPMOVM2B"}, {name: "VPMOVMToVec16x8", argLength: 1, reg: kv, asm: "VPMOVM2W"}, {name: "VPMOVMToVec16x16", argLength: 1, reg: kv, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x32", argLength: 1, reg: kv, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: kw, asm: "VPMOVM2W"}, {name: "VPMOVMToVec32x4", argLength: 1, reg: kv, asm: "VPMOVM2D"}, {name: "VPMOVMToVec32x8", argLength: 1, reg: kv, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x16", argLength: 1, reg: kv, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: kw, asm: "VPMOVM2D"}, {name: "VPMOVMToVec64x2", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, {name: "VPMOVMToVec64x4", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x8", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: kw, asm: "VPMOVM2Q"}, {name: "VPMOVVec8x16ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, {name: "VPMOVVec8x32ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x64ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: wk, asm: "VPMOVB2M"}, {name: "VPMOVVec16x8ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, {name: "VPMOVVec16x16ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x32ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: wk, asm: "VPMOVW2M"}, {name: "VPMOVVec32x4ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, {name: "VPMOVVec32x8ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x16ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: wk, asm: "VPMOVD2M"}, {name: "VPMOVVec64x2ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, {name: "VPMOVVec64x4ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x8ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: wk, asm: "VPMOVQ2M"}, - {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, - {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, - {name: "Zero512", argLength: 0, reg: fp01, asm: "VPXORQ"}, + {name: "Zero128", argLength: 0, reg: v01, asm: "VPXOR"}, + {name: "Zero256", argLength: 0, reg: v01, asm: "VPXOR"}, + {name: "Zero512", argLength: 0, reg: w01, asm: "VPXORQ"}, } var AMD64blocks = []blockData{ @@ -1308,7 +1343,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index d16de27fddbaec..09cfcfb4d9a41b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,877 +1,877 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv regInfo) []opData { +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPSMasked512", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: v11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPS512", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPSMasked512", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPS512", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPS512", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPS512", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPS512", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPS512", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS512", argLength: 2, reg: w21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPS512", argLength: 2, reg: w21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: w21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: w21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: w11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: w21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPSMasked128", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PS128", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PS128", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPSMasked128", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPS128", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPSMasked256", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PS256", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PS256", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPSMasked256", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PS256", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PS256", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PS256", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PS256", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPSMasked256", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPS256", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPDMasked128", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PD128", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPD128", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPDMasked128", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPD128", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPDMasked256", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PD256", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPD256", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPDMasked256", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPD256", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD512", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPDMasked512", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPD512", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPDMasked512", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPD512", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPD512", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPD512", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPD512", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPD512", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: w21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPD512", argLength: 2, reg: w21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD512", argLength: 2, reg: w21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: w11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: w21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTW256", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVW256", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVW256", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVW256", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSW512", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSWMasked512", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSW512", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSW512", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW512", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLW512", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWD512", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTW512", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSW512", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSW512", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVW512", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVW512", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVW512", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBW512", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSW512", argLength: 1, reg: w11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: w21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: w21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW512", argLength: 2, reg: w21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTW128", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVW128", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVW128", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVW128", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD512", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDD512", argLength: 2, reg: v21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDDMasked512", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: v21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSD512", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSD512", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLD512", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORD512", argLength: 2, reg: v21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSD512", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTD512", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVD512", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVD512", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDS512", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDS512", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVD512", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVD512", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVD512", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBD512", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSD512", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORD512", argLength: 2, reg: v21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORDMasked512", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: w11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: w21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: w21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: w21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSD512", argLength: 3, reg: w31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTD512", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDS512", argLength: 3, reg: w31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS512", argLength: 3, reg: w31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD512", argLength: 3, reg: w31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORD512", argLength: 2, reg: w21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSD128", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTD128", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVD128", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVD128", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDS128", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVD128", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVD128", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTD256", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVD256", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVD256", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVD256", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVD256", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ128", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQ128", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQ128", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQMasked128", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQ128", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQMasked128", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQ128", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQ128", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQMasked128", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQ128", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQMasked128", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ128", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ128", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ128", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ128", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVQ128", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQ128", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVQ128", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQ128", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQ256", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQ256", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQ256", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQ256", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ256", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQ256", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQ256", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: v21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: v21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQ512", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQ512", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQ512", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQ512", argLength: 2, reg: v21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQ512", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQ512", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQ512", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ512", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ512", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQ512", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQ512", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQ512", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVQ512", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQ512", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQ512", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQ512", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQ512", argLength: 2, reg: v21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: w21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: w21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: w21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ512", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: w21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTB128", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTB256", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSB512", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSB512", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSB512", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTB512", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSB512", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSB512", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBB512", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: w11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: w21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: w21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: w21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGW512", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUW512", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUW512", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: w21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD512", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUD512", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ512", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQ512", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGB512", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUB512", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUB512", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: v2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: w21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPD128", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPD128", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: v2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: vgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: vgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 4251c013a8cb1d..edc88dfbc6d473 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -9439,7 +9439,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9455,7 +9455,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9495,8 +9495,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9512,8 +9512,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9529,8 +9529,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9546,8 +9546,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9563,8 +9563,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9577,8 +9577,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9591,9 +9591,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9606,9 +9606,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9621,9 +9621,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9636,9 +9636,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9652,8 +9652,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9670,8 +9670,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9688,8 +9688,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9706,8 +9706,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9724,8 +9724,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9742,8 +9742,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9760,8 +9760,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9778,8 +9778,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9796,9 +9796,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9815,9 +9815,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9834,9 +9834,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9853,9 +9853,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9872,9 +9872,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9891,9 +9891,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9910,9 +9910,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9929,9 +9929,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9948,9 +9948,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9967,9 +9967,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9986,9 +9986,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10005,9 +10005,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10024,9 +10024,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10043,9 +10043,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10062,9 +10062,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10081,9 +10081,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10162,7 +10162,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10176,7 +10176,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10810,7 +10810,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10824,7 +10824,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10904,7 +10904,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10918,7 +10918,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10998,7 +10998,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11012,7 +11012,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11113,8 +11113,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11127,8 +11127,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11141,8 +11141,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11155,8 +11155,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11169,7 +11169,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11182,7 +11182,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11195,7 +11195,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11208,7 +11208,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11221,9 +11221,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11237,9 +11237,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11252,9 +11252,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11268,9 +11268,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11283,9 +11283,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11299,9 +11299,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11315,9 +11315,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11330,8 +11330,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11345,8 +11345,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11359,8 +11359,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11374,8 +11374,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11388,8 +11388,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11403,8 +11403,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11418,8 +11418,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11643,7 +11643,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11657,7 +11657,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11671,7 +11671,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12324,8 +12324,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12343,8 +12343,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12362,8 +12362,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12381,8 +12381,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12400,8 +12400,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12419,8 +12419,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12438,8 +12438,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12457,8 +12457,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12476,8 +12476,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12495,8 +12495,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12514,9 +12514,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12534,9 +12534,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12554,9 +12554,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12574,9 +12574,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12594,9 +12594,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12614,9 +12614,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12634,9 +12634,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12654,9 +12654,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12674,9 +12674,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12694,9 +12694,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12714,9 +12714,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12734,9 +12734,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12754,9 +12754,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12774,9 +12774,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12794,9 +12794,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12814,9 +12814,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12834,9 +12834,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12854,9 +12854,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12874,9 +12874,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12894,9 +12894,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12914,9 +12914,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12934,9 +12934,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12954,9 +12954,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12974,9 +12974,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12994,9 +12994,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13013,8 +13013,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13028,8 +13028,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13043,8 +13043,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13058,8 +13058,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13073,8 +13073,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13088,8 +13088,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13103,8 +13103,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13118,8 +13118,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13133,8 +13133,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13148,8 +13148,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13163,9 +13163,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13179,9 +13179,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13195,9 +13195,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13211,9 +13211,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13227,9 +13227,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13243,9 +13243,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13259,9 +13259,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13275,9 +13275,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13291,9 +13291,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13307,9 +13307,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13323,9 +13323,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13339,9 +13339,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13355,9 +13355,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13371,9 +13371,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13387,9 +13387,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13403,9 +13403,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13419,9 +13419,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13435,9 +13435,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13451,9 +13451,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13467,9 +13467,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13483,9 +13483,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13499,9 +13499,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13515,9 +13515,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13531,9 +13531,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13547,9 +13547,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13563,8 +13563,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13578,8 +13578,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13593,8 +13593,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13608,8 +13608,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13623,8 +13623,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13638,8 +13638,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13653,8 +13653,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13668,8 +13668,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13683,8 +13683,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13698,8 +13698,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13713,8 +13713,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13728,8 +13728,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13743,8 +13743,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13758,8 +13758,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13773,8 +13773,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13788,8 +13788,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13803,8 +13803,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13818,8 +13818,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13833,8 +13833,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13848,8 +13848,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14925,7 +14925,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14938,7 +14938,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETNE, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14951,7 +14951,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLT, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14964,7 +14964,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLE, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14977,7 +14977,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14990,7 +14990,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGE, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15003,7 +15003,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15016,7 +15016,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15029,7 +15029,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETHI, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15042,7 +15042,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCC, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15056,8 +15056,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15071,8 +15071,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15086,8 +15086,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15101,8 +15101,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15116,8 +15116,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15131,8 +15131,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15146,8 +15146,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15161,8 +15161,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15176,8 +15176,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15191,8 +15191,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15581,7 +15581,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15597,7 +15597,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15613,7 +15613,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAW, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15630,8 +15630,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15648,8 +15648,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15666,8 +15666,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15683,8 +15683,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15700,8 +15700,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15717,8 +15717,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15734,8 +15734,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15751,8 +15751,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15768,8 +15768,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15785,8 +15785,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15802,8 +15802,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15819,8 +15819,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15836,7 +15836,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15852,7 +15852,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15868,7 +15868,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15884,7 +15884,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15900,7 +15900,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15916,7 +15916,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVLQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15932,7 +15932,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15948,8 +15948,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15962,8 +15962,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15976,8 +15976,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15990,8 +15990,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16004,7 +16004,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -16020,8 +16020,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -16035,8 +16035,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16053,8 +16053,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16070,8 +16070,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16088,8 +16088,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16105,8 +16105,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16122,8 +16122,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16140,8 +16140,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16157,8 +16157,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16175,9 +16175,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16191,9 +16191,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16206,9 +16206,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16222,9 +16222,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16237,9 +16237,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16252,9 +16252,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16268,9 +16268,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16283,9 +16283,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16298,7 +16298,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16311,7 +16311,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16324,7 +16324,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16337,7 +16337,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16350,7 +16350,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16364,8 +16364,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16379,8 +16379,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16393,8 +16393,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16408,8 +16408,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16422,8 +16422,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16437,8 +16437,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16451,8 +16451,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16707,7 +16707,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16723,7 +16723,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16739,7 +16739,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16757,8 +16757,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGB, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16776,8 +16776,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16795,8 +16795,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16815,8 +16815,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16835,8 +16835,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16908,8 +16908,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16924,8 +16924,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16940,8 +16940,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16956,8 +16956,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16972,8 +16972,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16988,8 +16988,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17092,7 +17092,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHT0, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17103,7 +17103,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHNTA, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17286,8 +17286,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17300,7 +17300,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17316,8 +17316,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17330,7 +17330,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17346,8 +17346,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17361,8 +17361,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17378,8 +17378,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17395,8 +17395,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17413,8 +17413,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17430,8 +17430,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17448,9 +17448,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17463,9 +17463,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17479,9 +17479,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17494,9 +17494,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17509,9 +17509,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17525,9 +17525,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17540,9 +17540,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17639,8 +17639,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17656,8 +17656,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17673,8 +17673,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17690,8 +17690,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17707,8 +17707,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17724,8 +17724,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17742,9 +17742,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17761,9 +17761,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17780,9 +17780,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17799,9 +17799,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17818,9 +17818,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17837,9 +17837,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17856,9 +17856,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17875,9 +17875,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17894,9 +17894,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17913,9 +17913,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17932,9 +17932,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17951,9 +17951,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17970,9 +17970,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17989,9 +17989,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -18008,9 +18008,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -18141,7 +18141,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18157,8 +18157,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -18171,7 +18171,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18187,8 +18187,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -18201,7 +18201,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU64, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18217,8 +18217,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU64, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -18228,7 +18228,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2B, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18241,7 +18241,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2B, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18254,10 +18254,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2B, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18267,7 +18267,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2W, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18280,7 +18280,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2W, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18293,10 +18293,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2W, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18306,7 +18306,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2D, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18319,7 +18319,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2D, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18332,10 +18332,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2D, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18345,7 +18345,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18358,7 +18358,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18371,10 +18371,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18387,7 +18387,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18400,7 +18400,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18410,10 +18410,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVB2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18426,7 +18426,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18439,7 +18439,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18449,10 +18449,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVW2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18465,7 +18465,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18478,7 +18478,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18488,10 +18488,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVD2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18504,7 +18504,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18517,7 +18517,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18527,10 +18527,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQ2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18560,7 +18560,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18571,11 +18571,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18586,9 +18586,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18601,10 +18601,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18614,8 +18614,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18628,10 +18628,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18641,8 +18641,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18655,11 +18655,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18669,9 +18669,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18701,10 +18701,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18734,10 +18734,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18767,10 +18767,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18784,11 +18784,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18799,9 +18799,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18815,11 +18815,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18830,9 +18830,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18846,11 +18846,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18860,11 +18860,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18874,9 +18874,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18890,9 +18890,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18905,10 +18905,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18918,8 +18918,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18932,11 +18932,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18946,9 +18946,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18977,9 +18977,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19006,10 +19006,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19019,8 +19019,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19046,8 +19046,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19074,9 +19074,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19106,10 +19106,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19139,10 +19139,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19172,10 +19172,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19204,9 +19204,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19235,9 +19235,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19265,11 +19265,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19279,9 +19279,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19295,9 +19295,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19351,8 +19351,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19379,9 +19379,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19410,9 +19410,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19439,10 +19439,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19452,8 +19452,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19479,8 +19479,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19507,9 +19507,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19539,10 +19539,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19572,10 +19572,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19605,10 +19605,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19637,9 +19637,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19668,9 +19668,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19698,11 +19698,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19712,9 +19712,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19728,9 +19728,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19784,8 +19784,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19812,9 +19812,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19843,9 +19843,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19872,10 +19872,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19885,8 +19885,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19899,10 +19899,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19912,8 +19912,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19940,9 +19940,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19972,10 +19972,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20005,10 +20005,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20038,10 +20038,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20070,9 +20070,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20101,9 +20101,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20131,11 +20131,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20145,9 +20145,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20161,9 +20161,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20217,8 +20217,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20245,9 +20245,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20276,9 +20276,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20305,10 +20305,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20318,8 +20318,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20332,10 +20332,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20345,8 +20345,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20373,9 +20373,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20405,10 +20405,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20438,10 +20438,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20471,10 +20471,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20503,9 +20503,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20534,9 +20534,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20564,11 +20564,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20578,9 +20578,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20594,9 +20594,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20650,8 +20650,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20678,9 +20678,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20694,11 +20694,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20709,9 +20709,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20724,10 +20724,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20737,8 +20737,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20751,10 +20751,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20764,8 +20764,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20778,11 +20778,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20792,9 +20792,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20824,10 +20824,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20857,10 +20857,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20890,10 +20890,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20907,11 +20907,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20922,9 +20922,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20938,11 +20938,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20953,9 +20953,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20969,11 +20969,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20983,11 +20983,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20997,9 +20997,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21013,9 +21013,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21028,10 +21028,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21041,8 +21041,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21055,11 +21055,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21069,9 +21069,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21097,8 +21097,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21127,9 +21127,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21187,9 +21187,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21218,9 +21218,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21249,9 +21249,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21280,9 +21280,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21309,9 +21309,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21352,10 +21352,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21365,8 +21365,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21395,9 +21395,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21452,9 +21452,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21509,11 +21509,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21540,10 +21540,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21556,9 +21556,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21571,11 +21571,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21602,10 +21602,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21618,9 +21618,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21633,11 +21633,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21647,9 +21647,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21690,9 +21690,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21705,10 +21705,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21718,8 +21718,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21733,11 +21733,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21748,9 +21748,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21764,11 +21764,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21779,9 +21779,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21795,11 +21795,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21810,9 +21810,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21826,11 +21826,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21841,9 +21841,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21857,11 +21857,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21872,9 +21872,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21887,11 +21887,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21901,9 +21901,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21916,10 +21916,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21929,8 +21929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21944,11 +21944,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21959,9 +21959,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21974,11 +21974,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21988,9 +21988,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22003,11 +22003,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22034,10 +22034,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22050,9 +22050,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22065,11 +22065,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22096,10 +22096,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22112,9 +22112,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22127,11 +22127,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22141,9 +22141,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22156,11 +22156,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22170,9 +22170,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22198,8 +22198,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22228,9 +22228,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22288,9 +22288,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22319,9 +22319,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22350,9 +22350,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22381,9 +22381,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22410,9 +22410,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22453,10 +22453,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22466,8 +22466,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22496,9 +22496,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22553,9 +22553,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22610,11 +22610,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22641,10 +22641,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22657,9 +22657,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22672,11 +22672,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22703,10 +22703,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22719,9 +22719,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22734,11 +22734,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22748,9 +22748,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22791,9 +22791,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22806,10 +22806,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22819,8 +22819,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22834,11 +22834,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22849,9 +22849,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22865,11 +22865,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22880,9 +22880,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22895,11 +22895,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22909,9 +22909,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22925,11 +22925,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22940,9 +22940,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22956,11 +22956,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22971,9 +22971,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22987,11 +22987,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23002,9 +23002,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23018,11 +23018,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23033,9 +23033,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23065,10 +23065,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23081,10 +23081,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23094,8 +23094,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23108,11 +23108,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23122,9 +23122,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23137,11 +23137,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23151,9 +23151,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23183,10 +23183,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23216,10 +23216,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23232,11 +23232,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23263,10 +23263,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23279,9 +23279,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23294,11 +23294,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23325,10 +23325,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23341,9 +23341,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23356,11 +23356,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23370,9 +23370,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23385,11 +23385,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23399,9 +23399,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23431,10 +23431,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23448,11 +23448,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23463,9 +23463,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23491,8 +23491,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23521,9 +23521,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23537,9 +23537,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23552,9 +23552,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23612,9 +23612,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23643,9 +23643,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23689,9 +23689,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23705,9 +23705,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23737,10 +23737,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23781,10 +23781,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23794,8 +23794,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23808,11 +23808,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23822,9 +23822,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23837,11 +23837,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23851,9 +23851,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23883,10 +23883,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23916,10 +23916,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24005,10 +24005,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24021,9 +24021,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24067,10 +24067,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24083,9 +24083,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24112,9 +24112,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24155,9 +24155,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24187,10 +24187,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24204,9 +24204,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24232,8 +24232,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24262,9 +24262,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24278,9 +24278,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24293,9 +24293,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24353,9 +24353,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24384,9 +24384,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24430,9 +24430,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24446,9 +24446,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24478,10 +24478,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24522,10 +24522,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24535,8 +24535,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24549,11 +24549,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24563,9 +24563,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24578,11 +24578,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24592,9 +24592,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24624,10 +24624,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24657,10 +24657,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24746,10 +24746,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24762,9 +24762,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24808,10 +24808,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24824,9 +24824,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24853,9 +24853,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24896,9 +24896,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24928,10 +24928,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24945,9 +24945,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24960,10 +24960,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24973,8 +24973,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25003,9 +25003,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25019,9 +25019,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25034,9 +25034,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25065,11 +25065,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25080,9 +25080,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25096,11 +25096,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25111,9 +25111,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25127,9 +25127,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25143,11 +25143,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25158,9 +25158,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25174,9 +25174,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25189,10 +25189,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25202,8 +25202,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25216,11 +25216,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25230,9 +25230,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25245,11 +25245,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25259,9 +25259,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25288,12 +25288,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25317,12 +25317,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25332,11 +25332,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25346,12 +25346,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25392,10 +25392,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25408,9 +25408,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25454,10 +25454,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25470,9 +25470,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25485,11 +25485,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25499,9 +25499,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25528,9 +25528,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25544,9 +25544,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25559,10 +25559,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25572,8 +25572,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25602,9 +25602,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25618,9 +25618,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25633,9 +25633,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25678,11 +25678,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25693,9 +25693,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25709,11 +25709,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25724,9 +25724,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25740,9 +25740,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25756,11 +25756,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25771,9 +25771,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25787,9 +25787,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25802,10 +25802,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25815,8 +25815,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25829,11 +25829,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25843,9 +25843,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25858,11 +25858,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25872,9 +25872,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25901,12 +25901,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25930,12 +25930,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25945,11 +25945,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25959,12 +25959,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26005,10 +26005,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26021,9 +26021,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26067,10 +26067,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26083,9 +26083,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26098,11 +26098,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26112,9 +26112,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26141,9 +26141,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26157,9 +26157,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26172,10 +26172,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26185,8 +26185,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26200,11 +26200,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26215,9 +26215,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26231,11 +26231,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26246,9 +26246,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26261,11 +26261,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26275,9 +26275,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26291,11 +26291,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26306,9 +26306,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26322,11 +26322,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26337,9 +26337,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26353,11 +26353,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26368,9 +26368,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26384,11 +26384,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26399,9 +26399,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26415,11 +26415,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26430,9 +26430,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26445,10 +26445,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26458,8 +26458,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26472,11 +26472,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26486,9 +26486,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26501,11 +26501,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26515,9 +26515,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26530,11 +26530,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26544,12 +26544,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26559,11 +26559,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26573,12 +26573,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26588,11 +26588,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26602,12 +26602,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26617,11 +26617,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26648,10 +26648,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26664,9 +26664,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26679,11 +26679,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26710,10 +26710,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26726,9 +26726,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26741,11 +26741,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26755,9 +26755,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26770,11 +26770,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26784,9 +26784,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26800,11 +26800,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26815,9 +26815,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26843,8 +26843,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26873,9 +26873,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26962,9 +26962,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26993,9 +26993,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27023,10 +27023,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27036,8 +27036,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27066,9 +27066,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27095,9 +27095,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27138,9 +27138,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27181,8 +27181,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27211,9 +27211,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27300,9 +27300,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27331,9 +27331,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27361,10 +27361,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27374,8 +27374,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27404,9 +27404,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27433,9 +27433,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27476,9 +27476,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27506,10 +27506,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27519,8 +27519,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27534,11 +27534,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27549,9 +27549,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27565,11 +27565,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27580,9 +27580,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27596,11 +27596,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27611,9 +27611,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27626,10 +27626,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27639,8 +27639,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27654,11 +27654,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27669,9 +27669,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27684,11 +27684,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27698,9 +27698,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27713,11 +27713,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27727,9 +27727,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27758,9 +27758,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27789,9 +27789,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27820,9 +27820,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27851,9 +27851,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27867,11 +27867,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27882,9 +27882,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27898,11 +27898,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27913,9 +27913,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27929,11 +27929,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27944,9 +27944,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27960,11 +27960,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27975,9 +27975,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28006,9 +28006,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28037,9 +28037,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28068,9 +28068,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28099,9 +28099,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28115,11 +28115,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28130,9 +28130,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28146,11 +28146,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28161,9 +28161,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28192,9 +28192,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28223,9 +28223,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28269,9 +28269,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28300,9 +28300,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28331,11 +28331,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28346,9 +28346,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28362,11 +28362,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28377,9 +28377,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28393,9 +28393,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28409,11 +28409,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28424,9 +28424,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28440,11 +28440,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28455,9 +28455,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28471,9 +28471,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28487,11 +28487,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28502,9 +28502,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28518,11 +28518,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28533,9 +28533,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28549,11 +28549,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28564,9 +28564,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28595,9 +28595,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28610,11 +28610,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28624,9 +28624,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28655,9 +28655,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28686,9 +28686,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28715,9 +28715,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28746,9 +28746,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28761,11 +28761,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28775,9 +28775,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28806,9 +28806,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28837,9 +28837,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28866,9 +28866,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28882,11 +28882,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28897,9 +28897,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28912,11 +28912,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28926,9 +28926,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28942,11 +28942,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28957,9 +28957,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28973,11 +28973,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28988,9 +28988,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29003,11 +29003,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29017,9 +29017,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29033,10 +29033,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29047,8 +29047,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29062,10 +29062,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29076,8 +29076,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29096,7 +29096,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29108,12 +29108,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29138,10 +29138,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29152,8 +29152,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29167,10 +29167,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29181,8 +29181,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29213,12 +29213,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29243,10 +29243,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29257,8 +29257,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29272,10 +29272,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29286,8 +29286,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29318,12 +29318,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29377,10 +29377,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29391,8 +29391,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29406,10 +29406,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29420,8 +29420,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29468,12 +29468,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29498,10 +29498,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29512,8 +29512,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29527,10 +29527,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29541,8 +29541,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29573,12 +29573,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29589,10 +29589,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29603,8 +29603,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29618,10 +29618,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29632,8 +29632,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29652,7 +29652,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29664,12 +29664,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29681,12 +29681,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29701,7 +29701,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29712,11 +29712,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29727,9 +29727,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29743,11 +29743,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29758,9 +29758,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29779,7 +29779,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29791,12 +29791,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29807,11 +29807,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29822,9 +29822,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29838,11 +29838,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29853,9 +29853,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29870,12 +29870,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29886,7 +29886,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -29904,7 +29904,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29930,11 +29930,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29945,9 +29945,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29961,11 +29961,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29976,9 +29976,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29997,7 +29997,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30009,12 +30009,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30025,10 +30025,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30039,8 +30039,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30054,10 +30054,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30068,8 +30068,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30083,11 +30083,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30098,9 +30098,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30114,11 +30114,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30129,9 +30129,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30146,12 +30146,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30180,7 +30180,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30191,10 +30191,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30205,8 +30205,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30220,10 +30220,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30234,8 +30234,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30264,11 +30264,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30279,9 +30279,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30295,11 +30295,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30310,9 +30310,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30327,12 +30327,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30347,7 +30347,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30358,10 +30358,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30372,8 +30372,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30387,10 +30387,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30401,8 +30401,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30416,11 +30416,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30431,9 +30431,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30447,11 +30447,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30462,9 +30462,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30479,12 +30479,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30513,7 +30513,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30524,10 +30524,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30538,8 +30538,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30553,10 +30553,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30567,8 +30567,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30597,11 +30597,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30612,9 +30612,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30628,11 +30628,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30643,9 +30643,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30660,12 +30660,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30680,7 +30680,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30691,10 +30691,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30705,8 +30705,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30720,10 +30720,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30734,8 +30734,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30749,11 +30749,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30764,9 +30764,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30780,11 +30780,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30795,9 +30795,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30816,7 +30816,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30828,12 +30828,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30844,10 +30844,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30858,8 +30858,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30873,10 +30873,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30887,8 +30887,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30902,11 +30902,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30917,9 +30917,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30933,11 +30933,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30948,9 +30948,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30965,12 +30965,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30981,7 +30981,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -30999,7 +30999,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31026,12 +31026,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31060,7 +31060,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31091,7 +31091,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31103,12 +31103,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31124,7 +31124,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31136,12 +31136,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31157,7 +31157,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31169,12 +31169,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31190,7 +31190,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31202,12 +31202,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31223,7 +31223,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31235,12 +31235,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31256,7 +31256,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31268,12 +31268,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31289,7 +31289,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31301,12 +31301,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31322,7 +31322,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31334,12 +31334,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31355,7 +31355,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31367,12 +31367,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31388,7 +31388,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31400,12 +31400,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31421,7 +31421,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31433,12 +31433,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31449,11 +31449,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31464,11 +31464,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31479,9 +31479,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31495,9 +31495,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31516,7 +31516,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31528,12 +31528,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31544,11 +31544,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31559,11 +31559,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31574,9 +31574,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31590,9 +31590,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31611,7 +31611,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31623,12 +31623,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31639,11 +31639,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31654,11 +31654,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31669,9 +31669,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31685,9 +31685,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -68069,21 +68069,37 @@ var registersAMD64 = [...]Register{ {29, x86.REG_X13, "X13"}, {30, x86.REG_X14, "X14"}, {31, x86.REG_X15, "X15"}, - {32, x86.REG_K0, "K0"}, - {33, x86.REG_K1, "K1"}, - {34, x86.REG_K2, "K2"}, - {35, x86.REG_K3, "K3"}, - {36, x86.REG_K4, "K4"}, - {37, x86.REG_K5, "K5"}, - {38, x86.REG_K6, "K6"}, - {39, x86.REG_K7, "K7"}, - {40, 0, "SB"}, + {32, x86.REG_X16, "X16"}, + {33, x86.REG_X17, "X17"}, + {34, x86.REG_X18, "X18"}, + {35, x86.REG_X19, "X19"}, + {36, x86.REG_X20, "X20"}, + {37, x86.REG_X21, "X21"}, + {38, x86.REG_X22, "X22"}, + {39, x86.REG_X23, "X23"}, + {40, x86.REG_X24, "X24"}, + {41, x86.REG_X25, "X25"}, + {42, x86.REG_X26, "X26"}, + {43, x86.REG_X27, "X27"}, + {44, x86.REG_X28, "X28"}, + {45, x86.REG_X29, "X29"}, + {46, x86.REG_X30, "X30"}, + {47, x86.REG_X31, "X31"}, + {48, x86.REG_K0, "K0"}, + {49, x86.REG_K1, "K1"}, + {50, x86.REG_K2, "K2"}, + {51, x86.REG_K3, "K3"}, + {52, x86.REG_K4, "K4"}, + {53, x86.REG_K5, "K5"}, + {54, x86.REG_K6, "K6"}, + {55, x86.REG_K7, "K7"}, + {56, 0, "SB"}, } var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11} var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30} var gpRegMaskAMD64 = regMask(49135) var fpRegMaskAMD64 = regMask(2147418112) -var specialRegMaskAMD64 = regMask(1093069176832) +var specialRegMaskAMD64 = regMask(71494646231990272) var framepointerRegAMD64 = int8(5) var linkRegAMD64 = int8(-1) var registersARM = [...]Register{ From aab8b173a96449110319455e8015fc140e43766e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 16:24:34 +0000 Subject: [PATCH 072/139] [dev.simd] cmd/compile, simd: Int64x2 Greater and Uint* Equal This CL is generated by CL 686817. Change-Id: I19b8e468594514b2b1c99f8ad766f78b5e194c80 Reviewed-on: https://go-review.googlesource.com/c/go/+/686876 TryBot-Bypass: David Chase Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 11 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 18 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 17 +- src/cmd/compile/internal/ssa/opGen.go | 173 ++++++++-------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 189 ++---------------- .../compile/internal/ssagen/simdintrinsics.go | 18 +- src/simd/ops_amd64.go | 90 ++++----- src/simd/simd_wrapped_test.go | 4 +- 8 files changed, 188 insertions(+), 332 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d87548c27faa5b..12a8c857bd4fee 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -115,6 +115,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPGTW256, ssa.OpAMD64VPCMPGTD128, ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPCMPGTQ128, ssa.OpAMD64VPCMPGTQ256, ssa.OpAMD64VMAXPS128, ssa.OpAMD64VMAXPS256, @@ -688,25 +689,25 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPW512, ssa.OpAMD64VPCMPD512, ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VPCMPUQ512, ssa.OpAMD64VPCMPUB128, ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPCMPUB512, ssa.OpAMD64VPCMPUW128, ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPCMPUW512, ssa.OpAMD64VPCMPUD128, ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUQ128, ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPQ128, ssa.OpAMD64VPCMPB128, ssa.OpAMD64VPCMPB256, ssa.OpAMD64VPCMPW128, ssa.OpAMD64VPCMPW256, ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPQ128, ssa.OpAMD64VPCMPQ256: p = simdV2kImm8(s, v) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 7ea24fe95cea22..09ab9b840aeff9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -283,17 +283,17 @@ (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) (EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) -(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) -(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) +(EqualUint8x16 ...) => (VPCMPEQB128 ...) +(EqualUint8x32 ...) => (VPCMPEQB256 ...) (EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) -(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) -(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) +(EqualUint16x8 ...) => (VPCMPEQW128 ...) +(EqualUint16x16 ...) => (VPCMPEQW256 ...) (EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) -(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) -(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) +(EqualUint32x4 ...) => (VPCMPEQD128 ...) +(EqualUint32x8 ...) => (VPCMPEQD256 ...) (EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) -(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) -(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) +(EqualUint64x2 ...) => (VPCMPEQQ128 ...) +(EqualUint64x4 ...) => (VPCMPEQQ256 ...) (EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) (EqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) (EqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) @@ -428,7 +428,7 @@ (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) (GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) +(GreaterInt64x2 ...) => (VPCMPGTQ128 ...) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) (GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) (GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 09cfcfb4d9a41b..f0a149f7d8a88b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -436,6 +436,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -837,36 +838,36 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index edc88dfbc6d473..d9fea94fc3f65e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1629,6 +1629,7 @@ const ( OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 OpAMD64VPCMPEQQ128 + OpAMD64VPCMPGTQ128 OpAMD64VPMAXSQ128 OpAMD64VPMAXSQMasked128 OpAMD64VPMINSQ128 @@ -2030,36 +2031,36 @@ const ( OpAMD64VINSERTI128256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 - OpAMD64VPCMPUW256 OpAMD64VPCMPUWMasked256 + OpAMD64VPCMPUW256 OpAMD64VPCMPUW512 OpAMD64VPCMPUWMasked512 - OpAMD64VPCMPUW128 OpAMD64VPCMPUWMasked128 + OpAMD64VPCMPUW128 OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked512 - OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked128 - OpAMD64VPCMPUD256 + OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked256 - OpAMD64VPCMPUQ128 + OpAMD64VPCMPUD256 OpAMD64VPCMPUQMasked128 - OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked256 + OpAMD64VPCMPUQ256 OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 - OpAMD64VPCMPUB128 OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 OpAMD64VGF2P8AFFINEINVQB128 OpAMD64VGF2P8AFFINEINVQBMasked128 OpAMD64VGF2P8AFFINEQBMasked128 - OpAMD64VPCMPUB256 + OpAMD64VPCMPUB128 OpAMD64VPCMPUBMasked256 OpAMD64VGF2P8AFFINEQB256 OpAMD64VGF2P8AFFINEINVQB256 OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VGF2P8AFFINEQBMasked256 + OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 @@ -25058,6 +25059,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPGTQ128", + argLen: 2, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSQ128", argLen: 2, @@ -31113,15 +31128,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW256", + name: "VPCMPUWMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31129,16 +31145,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31179,15 +31193,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW128", + name: "VPCMPUWMasked128", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31195,16 +31210,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31244,22 +31257,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUDMasked128", auxType: auxInt8, @@ -31278,11 +31275,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31311,11 +31307,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31344,11 +31339,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31377,11 +31371,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31393,16 +31386,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", + name: "VPCMPUQ512", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31410,15 +31402,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB128", + name: "VPCMPUQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31505,11 +31498,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31599,6 +31591,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPCMPUB512", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2e27077e81926b..4dd1fcbcb75815 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1530,27 +1530,35 @@ func rewriteValueAMD64(v *Value) bool { case OpEqualMaskedUint8x64: return rewriteValueAMD64_OpEqualMaskedUint8x64(v) case OpEqualUint16x16: - return rewriteValueAMD64_OpEqualUint16x16(v) + v.Op = OpAMD64VPCMPEQW256 + return true case OpEqualUint16x32: return rewriteValueAMD64_OpEqualUint16x32(v) case OpEqualUint16x8: - return rewriteValueAMD64_OpEqualUint16x8(v) + v.Op = OpAMD64VPCMPEQW128 + return true case OpEqualUint32x16: return rewriteValueAMD64_OpEqualUint32x16(v) case OpEqualUint32x4: - return rewriteValueAMD64_OpEqualUint32x4(v) + v.Op = OpAMD64VPCMPEQD128 + return true case OpEqualUint32x8: - return rewriteValueAMD64_OpEqualUint32x8(v) + v.Op = OpAMD64VPCMPEQD256 + return true case OpEqualUint64x2: - return rewriteValueAMD64_OpEqualUint64x2(v) + v.Op = OpAMD64VPCMPEQQ128 + return true case OpEqualUint64x4: - return rewriteValueAMD64_OpEqualUint64x4(v) + v.Op = OpAMD64VPCMPEQQ256 + return true case OpEqualUint64x8: return rewriteValueAMD64_OpEqualUint64x8(v) case OpEqualUint8x16: - return rewriteValueAMD64_OpEqualUint8x16(v) + v.Op = OpAMD64VPCMPEQB128 + return true case OpEqualUint8x32: - return rewriteValueAMD64_OpEqualUint8x32(v) + v.Op = OpAMD64VPCMPEQB256 + return true case OpEqualUint8x64: return rewriteValueAMD64_OpEqualUint8x64(v) case OpFMA: @@ -1914,7 +1922,8 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPCMPGTD256 return true case OpGreaterInt64x2: - return rewriteValueAMD64_OpGreaterInt64x2(v) + v.Op = OpAMD64VPCMPGTQ128 + return true case OpGreaterInt64x4: v.Op = OpAMD64VPCMPGTQ256 return true @@ -33212,24 +33221,6 @@ func rewriteValueAMD64_OpEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33248,24 +33239,6 @@ func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33284,78 +33257,6 @@ func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33374,42 +33275,6 @@ func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -35875,24 +35740,6 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index c6e8961738741b..15351b678b49e8 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -284,6 +284,14 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -294,17 +302,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) @@ -430,6 +430,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) @@ -440,7 +441,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 26a0d3e9ad4e74..55c4b32db00bf5 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1429,6 +1429,46 @@ func (x Int64x2) Equal(y Int64x2) Mask64x2 // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX +func (x Uint8x16) Equal(y Uint8x16) Mask8x16 + +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX2 +func (x Uint8x32) Equal(y Uint8x32) Mask8x32 + +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX +func (x Uint16x8) Equal(y Uint16x8) Mask16x8 + +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX2 +func (x Uint16x16) Equal(y Uint16x16) Mask16x16 + +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX +func (x Uint32x4) Equal(y Uint32x4) Mask32x4 + +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX2 +func (x Uint32x8) Equal(y Uint32x8) Mask32x8 + +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX +func (x Uint64x2) Equal(y Uint64x2) Mask64x2 + +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX2 +func (x Uint64x4) Equal(y Uint64x4) Mask64x4 + // Equal compares for equality. // // Asm: VCMPPS, CPU Feature: AVX @@ -1479,61 +1519,21 @@ func (x Int32x16) Equal(y Int32x16) Mask32x16 // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Equal(y Int64x8) Mask64x8 -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Equal(y Uint8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Equal(y Uint8x32) Mask8x32 - // Equal compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Equal(y Uint16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Equal(y Uint16x16) Mask16x16 - // Equal compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Equal(y Uint32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Equal(y Uint32x8) Mask32x8 - // Equal compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Equal(y Uint32x16) Mask32x16 -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Equal(y Uint64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Equal(y Uint64x4) Mask64x4 - // Equal compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX @@ -2245,6 +2245,11 @@ func (x Int32x4) Greater(y Int32x4) Mask32x4 // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 +// Greater compares for greater than. +// +// Asm: VPCMPGTQ, CPU Feature: AVX +func (x Int64x2) Greater(y Int64x2) Int64x2 + // Greater compares for greater than. // // Asm: VPCMPGTQ, CPU Feature: AVX2 @@ -2295,11 +2300,6 @@ func (x Int16x32) Greater(y Int16x32) Mask16x32 // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Greater(y Int32x16) Mask32x16 -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Greater(y Int64x2) Mask64x2 - // Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index bdbb25bfce4e6a..181a937d7ebc24 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -4018,6 +4018,8 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) + case "Greater": + gotv = vec0.Greater(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4113,8 +4115,6 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() case "GreaterEqual": gotv = vec0.GreaterEqual(vec1).AsInt64x2() case "Less": From 9ea33ed5388a42aed30af526af3fcc5b185fb62d Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 8 Jul 2025 12:52:30 -0400 Subject: [PATCH 073/139] [dev.simd] cmd/compile: output of simd generator, more ... rewrite rules Generated by simdgen CL 686378 Change-Id: I876ab91085c266ced59fc82ea12be709dc7eb721 Reviewed-on: https://go-review.googlesource.com/c/go/+/686495 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssa/_gen/simdAMD64.rules | 204 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1752 ++--------------- 2 files changed, 306 insertions(+), 1650 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 09ab9b840aeff9..c55a1f3f639711 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -377,12 +377,12 @@ (FusedMultiplySubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (FusedMultiplySubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (FusedMultiplySubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(GaloisFieldAffineTransformUint8x16 [a] x y) => (VGF2P8AFFINEQB128 [a] x y) -(GaloisFieldAffineTransformUint8x32 [a] x y) => (VGF2P8AFFINEQB256 [a] x y) -(GaloisFieldAffineTransformUint8x64 [a] x y) => (VGF2P8AFFINEQB512 [a] x y) -(GaloisFieldAffineTransformInversedUint8x16 [a] x y) => (VGF2P8AFFINEINVQB128 [a] x y) -(GaloisFieldAffineTransformInversedUint8x32 [a] x y) => (VGF2P8AFFINEINVQB256 [a] x y) -(GaloisFieldAffineTransformInversedUint8x64 [a] x y) => (VGF2P8AFFINEINVQB512 [a] x y) +(GaloisFieldAffineTransformUint8x16 ...) => (VGF2P8AFFINEQB128 ...) +(GaloisFieldAffineTransformUint8x32 ...) => (VGF2P8AFFINEQB256 ...) +(GaloisFieldAffineTransformUint8x64 ...) => (VGF2P8AFFINEQB512 ...) +(GaloisFieldAffineTransformInversedUint8x16 ...) => (VGF2P8AFFINEINVQB128 ...) +(GaloisFieldAffineTransformInversedUint8x32 ...) => (VGF2P8AFFINEINVQB256 ...) +(GaloisFieldAffineTransformInversedUint8x64 ...) => (VGF2P8AFFINEINVQB512 ...) (GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) (GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) (GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) @@ -395,24 +395,24 @@ (GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(Get128Float32x8 [a] x) => (VEXTRACTF128128 [a] x) -(Get128Float64x4 [a] x) => (VEXTRACTF128128 [a] x) -(Get128Int8x32 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Int16x16 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Int32x8 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Int64x4 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint8x32 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint16x16 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint32x8 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint64x4 [a] x) => (VEXTRACTI128128 [a] x) -(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) -(GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) -(GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) -(GetElemInt64x2 [a] x) => (VPEXTRQ128 [a] x) -(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) -(GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) -(GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) -(GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) +(Get128Float32x8 ...) => (VEXTRACTF128128 ...) +(Get128Float64x4 ...) => (VEXTRACTF128128 ...) +(Get128Int8x32 ...) => (VEXTRACTI128128 ...) +(Get128Int16x16 ...) => (VEXTRACTI128128 ...) +(Get128Int32x8 ...) => (VEXTRACTI128128 ...) +(Get128Int64x4 ...) => (VEXTRACTI128128 ...) +(Get128Uint8x32 ...) => (VEXTRACTI128128 ...) +(Get128Uint16x16 ...) => (VEXTRACTI128128 ...) +(Get128Uint32x8 ...) => (VEXTRACTI128128 ...) +(Get128Uint64x4 ...) => (VEXTRACTI128128 ...) +(GetElemInt8x16 ...) => (VPEXTRB128 ...) +(GetElemInt16x8 ...) => (VPEXTRW128 ...) +(GetElemInt32x4 ...) => (VPEXTRD128 ...) +(GetElemInt64x2 ...) => (VPEXTRQ128 ...) +(GetElemUint8x16 ...) => (VPEXTRB128 ...) +(GetElemUint16x8 ...) => (VPEXTRW128 ...) +(GetElemUint32x4 ...) => (VPEXTRD128 ...) +(GetElemUint64x2 ...) => (VPEXTRQ128 ...) (GreaterFloat32x4 x y) => (VCMPPS128 [14] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [14] x y) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) @@ -1031,18 +1031,18 @@ (PopCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (PopCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (PopCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(RotateAllLeftInt32x4 [a] x) => (VPROLD128 [a] x) -(RotateAllLeftInt32x8 [a] x) => (VPROLD256 [a] x) -(RotateAllLeftInt32x16 [a] x) => (VPROLD512 [a] x) -(RotateAllLeftInt64x2 [a] x) => (VPROLQ128 [a] x) -(RotateAllLeftInt64x4 [a] x) => (VPROLQ256 [a] x) -(RotateAllLeftInt64x8 [a] x) => (VPROLQ512 [a] x) -(RotateAllLeftUint32x4 [a] x) => (VPROLD128 [a] x) -(RotateAllLeftUint32x8 [a] x) => (VPROLD256 [a] x) -(RotateAllLeftUint32x16 [a] x) => (VPROLD512 [a] x) -(RotateAllLeftUint64x2 [a] x) => (VPROLQ128 [a] x) -(RotateAllLeftUint64x4 [a] x) => (VPROLQ256 [a] x) -(RotateAllLeftUint64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllLeftInt32x4 ...) => (VPROLD128 ...) +(RotateAllLeftInt32x8 ...) => (VPROLD256 ...) +(RotateAllLeftInt32x16 ...) => (VPROLD512 ...) +(RotateAllLeftInt64x2 ...) => (VPROLQ128 ...) +(RotateAllLeftInt64x4 ...) => (VPROLQ256 ...) +(RotateAllLeftInt64x8 ...) => (VPROLQ512 ...) +(RotateAllLeftUint32x4 ...) => (VPROLD128 ...) +(RotateAllLeftUint32x8 ...) => (VPROLD256 ...) +(RotateAllLeftUint32x16 ...) => (VPROLD512 ...) +(RotateAllLeftUint64x2 ...) => (VPROLQ128 ...) +(RotateAllLeftUint64x4 ...) => (VPROLQ256 ...) +(RotateAllLeftUint64x8 ...) => (VPROLQ512 ...) (RotateAllLeftMaskedInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) (RotateAllLeftMaskedInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) (RotateAllLeftMaskedInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) @@ -1055,18 +1055,18 @@ (RotateAllLeftMaskedUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) (RotateAllLeftMaskedUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) (RotateAllLeftMaskedUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(RotateAllRightInt32x4 [a] x) => (VPRORD128 [a] x) -(RotateAllRightInt32x8 [a] x) => (VPRORD256 [a] x) -(RotateAllRightInt32x16 [a] x) => (VPRORD512 [a] x) -(RotateAllRightInt64x2 [a] x) => (VPRORQ128 [a] x) -(RotateAllRightInt64x4 [a] x) => (VPRORQ256 [a] x) -(RotateAllRightInt64x8 [a] x) => (VPRORQ512 [a] x) -(RotateAllRightUint32x4 [a] x) => (VPRORD128 [a] x) -(RotateAllRightUint32x8 [a] x) => (VPRORD256 [a] x) -(RotateAllRightUint32x16 [a] x) => (VPRORD512 [a] x) -(RotateAllRightUint64x2 [a] x) => (VPRORQ128 [a] x) -(RotateAllRightUint64x4 [a] x) => (VPRORQ256 [a] x) -(RotateAllRightUint64x8 [a] x) => (VPRORQ512 [a] x) +(RotateAllRightInt32x4 ...) => (VPRORD128 ...) +(RotateAllRightInt32x8 ...) => (VPRORD256 ...) +(RotateAllRightInt32x16 ...) => (VPRORD512 ...) +(RotateAllRightInt64x2 ...) => (VPRORQ128 ...) +(RotateAllRightInt64x4 ...) => (VPRORQ256 ...) +(RotateAllRightInt64x8 ...) => (VPRORQ512 ...) +(RotateAllRightUint32x4 ...) => (VPRORD128 ...) +(RotateAllRightUint32x8 ...) => (VPRORD256 ...) +(RotateAllRightUint32x16 ...) => (VPRORD512 ...) +(RotateAllRightUint64x2 ...) => (VPRORQ128 ...) +(RotateAllRightUint64x4 ...) => (VPRORQ256 ...) +(RotateAllRightUint64x8 ...) => (VPRORQ512 ...) (RotateAllRightMaskedInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) (RotateAllRightMaskedInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) (RotateAllRightMaskedInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) @@ -1219,24 +1219,24 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(Set128Float32x8 [a] x y) => (VINSERTF128256 [a] x y) -(Set128Float64x4 [a] x y) => (VINSERTF128256 [a] x y) -(Set128Int8x32 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Int16x16 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Int32x8 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Int64x4 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint8x32 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint16x16 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint32x8 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint64x4 [a] x y) => (VINSERTI128256 [a] x y) -(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) -(SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) -(SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) -(SetElemInt64x2 [a] x y) => (VPINSRQ128 [a] x y) -(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) -(SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) -(SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) -(SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) +(Set128Float32x8 ...) => (VINSERTF128256 ...) +(Set128Float64x4 ...) => (VINSERTF128256 ...) +(Set128Int8x32 ...) => (VINSERTI128256 ...) +(Set128Int16x16 ...) => (VINSERTI128256 ...) +(Set128Int32x8 ...) => (VINSERTI128256 ...) +(Set128Int64x4 ...) => (VINSERTI128256 ...) +(Set128Uint8x32 ...) => (VINSERTI128256 ...) +(Set128Uint16x16 ...) => (VINSERTI128256 ...) +(Set128Uint32x8 ...) => (VINSERTI128256 ...) +(Set128Uint64x4 ...) => (VINSERTI128256 ...) +(SetElemInt8x16 ...) => (VPINSRB128 ...) +(SetElemInt16x8 ...) => (VPINSRW128 ...) +(SetElemInt32x4 ...) => (VPINSRD128 ...) +(SetElemInt64x2 ...) => (VPINSRQ128 ...) +(SetElemUint8x16 ...) => (VPINSRB128 ...) +(SetElemUint16x8 ...) => (VPINSRW128 ...) +(SetElemUint32x4 ...) => (VPINSRD128 ...) +(SetElemUint64x2 ...) => (VPINSRQ128 ...) (ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) (ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) (ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) @@ -1251,24 +1251,24 @@ (ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) -(ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) => (VPSHLDW128 [a] x y) -(ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) => (VPSHLDW256 [a] x y) -(ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) => (VPSHLDW512 [a] x y) -(ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) => (VPSHLDD128 [a] x y) -(ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) => (VPSHLDD256 [a] x y) -(ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) => (VPSHLDD512 [a] x y) -(ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) => (VPSHLDQ128 [a] x y) -(ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) => (VPSHLDQ256 [a] x y) -(ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) => (VPSHLDQ512 [a] x y) -(ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) => (VPSHLDW128 [a] x y) -(ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) => (VPSHLDW256 [a] x y) -(ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) => (VPSHLDW512 [a] x y) -(ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) => (VPSHLDD128 [a] x y) -(ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) => (VPSHLDD256 [a] x y) -(ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) => (VPSHLDD512 [a] x y) -(ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) => (VPSHLDQ128 [a] x y) -(ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) => (VPSHLDQ256 [a] x y) -(ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllLeftAndFillUpperFromInt16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftAndFillUpperFromInt16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftAndFillUpperFromInt16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftAndFillUpperFromInt32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftAndFillUpperFromInt32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftAndFillUpperFromInt32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftAndFillUpperFromInt64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftAndFillUpperFromInt64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftAndFillUpperFromInt64x8 ...) => (VPSHLDQ512 ...) +(ShiftAllLeftAndFillUpperFromUint16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftAndFillUpperFromUint16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftAndFillUpperFromUint16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftAndFillUpperFromUint32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftAndFillUpperFromUint32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftAndFillUpperFromUint32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftAndFillUpperFromUint64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftAndFillUpperFromUint64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftAndFillUpperFromUint64x8 ...) => (VPSHLDQ512 ...) (ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) @@ -1307,24 +1307,24 @@ (ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) (ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) (ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) -(ShiftAllRightAndFillUpperFromInt16x8 [a] x y) => (VPSHRDW128 [a] x y) -(ShiftAllRightAndFillUpperFromInt16x16 [a] x y) => (VPSHRDW256 [a] x y) -(ShiftAllRightAndFillUpperFromInt16x32 [a] x y) => (VPSHRDW512 [a] x y) -(ShiftAllRightAndFillUpperFromInt32x4 [a] x y) => (VPSHRDD128 [a] x y) -(ShiftAllRightAndFillUpperFromInt32x8 [a] x y) => (VPSHRDD256 [a] x y) -(ShiftAllRightAndFillUpperFromInt32x16 [a] x y) => (VPSHRDD512 [a] x y) -(ShiftAllRightAndFillUpperFromInt64x2 [a] x y) => (VPSHRDQ128 [a] x y) -(ShiftAllRightAndFillUpperFromInt64x4 [a] x y) => (VPSHRDQ256 [a] x y) -(ShiftAllRightAndFillUpperFromInt64x8 [a] x y) => (VPSHRDQ512 [a] x y) -(ShiftAllRightAndFillUpperFromUint16x8 [a] x y) => (VPSHRDW128 [a] x y) -(ShiftAllRightAndFillUpperFromUint16x16 [a] x y) => (VPSHRDW256 [a] x y) -(ShiftAllRightAndFillUpperFromUint16x32 [a] x y) => (VPSHRDW512 [a] x y) -(ShiftAllRightAndFillUpperFromUint32x4 [a] x y) => (VPSHRDD128 [a] x y) -(ShiftAllRightAndFillUpperFromUint32x8 [a] x y) => (VPSHRDD256 [a] x y) -(ShiftAllRightAndFillUpperFromUint32x16 [a] x y) => (VPSHRDD512 [a] x y) -(ShiftAllRightAndFillUpperFromUint64x2 [a] x y) => (VPSHRDQ128 [a] x y) -(ShiftAllRightAndFillUpperFromUint64x4 [a] x y) => (VPSHRDQ256 [a] x y) -(ShiftAllRightAndFillUpperFromUint64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightAndFillUpperFromInt16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightAndFillUpperFromInt16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightAndFillUpperFromInt16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightAndFillUpperFromInt32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightAndFillUpperFromInt32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightAndFillUpperFromInt32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightAndFillUpperFromInt64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightAndFillUpperFromInt64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightAndFillUpperFromInt64x8 ...) => (VPSHRDQ512 ...) +(ShiftAllRightAndFillUpperFromUint16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightAndFillUpperFromUint16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightAndFillUpperFromUint16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightAndFillUpperFromUint32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightAndFillUpperFromUint32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightAndFillUpperFromUint32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightAndFillUpperFromUint64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightAndFillUpperFromUint64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightAndFillUpperFromUint64x8 ...) => (VPSHRDQ512 ...) (ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) (ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) (ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 4dd1fcbcb75815..98bc0779f65e42 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1694,11 +1694,14 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldAffineTransformInversedMaskedUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v) case OpGaloisFieldAffineTransformInversedUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v) + v.Op = OpAMD64VGF2P8AFFINEINVQB128 + return true case OpGaloisFieldAffineTransformInversedUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v) + v.Op = OpAMD64VGF2P8AFFINEINVQB256 + return true case OpGaloisFieldAffineTransformInversedUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v) + v.Op = OpAMD64VGF2P8AFFINEINVQB512 + return true case OpGaloisFieldAffineTransformMaskedUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v) case OpGaloisFieldAffineTransformMaskedUint8x32: @@ -1706,11 +1709,14 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldAffineTransformMaskedUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v) case OpGaloisFieldAffineTransformUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v) + v.Op = OpAMD64VGF2P8AFFINEQB128 + return true case OpGaloisFieldAffineTransformUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v) + v.Op = OpAMD64VGF2P8AFFINEQB256 + return true case OpGaloisFieldAffineTransformUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v) + v.Op = OpAMD64VGF2P8AFFINEQB512 + return true case OpGaloisFieldMulMaskedUint8x16: return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v) case OpGaloisFieldMulMaskedUint8x32: @@ -1727,25 +1733,35 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VGF2P8MULB512 return true case OpGet128Float32x8: - return rewriteValueAMD64_OpGet128Float32x8(v) + v.Op = OpAMD64VEXTRACTF128128 + return true case OpGet128Float64x4: - return rewriteValueAMD64_OpGet128Float64x4(v) + v.Op = OpAMD64VEXTRACTF128128 + return true case OpGet128Int16x16: - return rewriteValueAMD64_OpGet128Int16x16(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Int32x8: - return rewriteValueAMD64_OpGet128Int32x8(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Int64x4: - return rewriteValueAMD64_OpGet128Int64x4(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Int8x32: - return rewriteValueAMD64_OpGet128Int8x32(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint16x16: - return rewriteValueAMD64_OpGet128Uint16x16(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint32x8: - return rewriteValueAMD64_OpGet128Uint32x8(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint64x4: - return rewriteValueAMD64_OpGet128Uint64x4(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint8x32: - return rewriteValueAMD64_OpGet128Uint8x32(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -1756,21 +1772,29 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64LoweredGetClosurePtr return true case OpGetElemInt16x8: - return rewriteValueAMD64_OpGetElemInt16x8(v) + v.Op = OpAMD64VPEXTRW128 + return true case OpGetElemInt32x4: - return rewriteValueAMD64_OpGetElemInt32x4(v) + v.Op = OpAMD64VPEXTRD128 + return true case OpGetElemInt64x2: - return rewriteValueAMD64_OpGetElemInt64x2(v) + v.Op = OpAMD64VPEXTRQ128 + return true case OpGetElemInt8x16: - return rewriteValueAMD64_OpGetElemInt8x16(v) + v.Op = OpAMD64VPEXTRB128 + return true case OpGetElemUint16x8: - return rewriteValueAMD64_OpGetElemUint16x8(v) + v.Op = OpAMD64VPEXTRW128 + return true case OpGetElemUint32x4: - return rewriteValueAMD64_OpGetElemUint32x4(v) + v.Op = OpAMD64VPEXTRD128 + return true case OpGetElemUint64x2: - return rewriteValueAMD64_OpGetElemUint64x2(v) + v.Op = OpAMD64VPEXTRQ128 + return true case OpGetElemUint8x16: - return rewriteValueAMD64_OpGetElemUint8x16(v) + v.Op = OpAMD64VPEXTRB128 + return true case OpGetG: return rewriteValueAMD64_OpGetG(v) case OpGreaterEqualFloat32x16: @@ -3407,17 +3431,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64PrefetchNTA return true case OpRotateAllLeftInt32x16: - return rewriteValueAMD64_OpRotateAllLeftInt32x16(v) + v.Op = OpAMD64VPROLD512 + return true case OpRotateAllLeftInt32x4: - return rewriteValueAMD64_OpRotateAllLeftInt32x4(v) + v.Op = OpAMD64VPROLD128 + return true case OpRotateAllLeftInt32x8: - return rewriteValueAMD64_OpRotateAllLeftInt32x8(v) + v.Op = OpAMD64VPROLD256 + return true case OpRotateAllLeftInt64x2: - return rewriteValueAMD64_OpRotateAllLeftInt64x2(v) + v.Op = OpAMD64VPROLQ128 + return true case OpRotateAllLeftInt64x4: - return rewriteValueAMD64_OpRotateAllLeftInt64x4(v) + v.Op = OpAMD64VPROLQ256 + return true case OpRotateAllLeftInt64x8: - return rewriteValueAMD64_OpRotateAllLeftInt64x8(v) + v.Op = OpAMD64VPROLQ512 + return true case OpRotateAllLeftMaskedInt32x16: return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v) case OpRotateAllLeftMaskedInt32x4: @@ -3443,29 +3473,41 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateAllLeftMaskedUint64x8: return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v) case OpRotateAllLeftUint32x16: - return rewriteValueAMD64_OpRotateAllLeftUint32x16(v) + v.Op = OpAMD64VPROLD512 + return true case OpRotateAllLeftUint32x4: - return rewriteValueAMD64_OpRotateAllLeftUint32x4(v) + v.Op = OpAMD64VPROLD128 + return true case OpRotateAllLeftUint32x8: - return rewriteValueAMD64_OpRotateAllLeftUint32x8(v) + v.Op = OpAMD64VPROLD256 + return true case OpRotateAllLeftUint64x2: - return rewriteValueAMD64_OpRotateAllLeftUint64x2(v) + v.Op = OpAMD64VPROLQ128 + return true case OpRotateAllLeftUint64x4: - return rewriteValueAMD64_OpRotateAllLeftUint64x4(v) + v.Op = OpAMD64VPROLQ256 + return true case OpRotateAllLeftUint64x8: - return rewriteValueAMD64_OpRotateAllLeftUint64x8(v) + v.Op = OpAMD64VPROLQ512 + return true case OpRotateAllRightInt32x16: - return rewriteValueAMD64_OpRotateAllRightInt32x16(v) + v.Op = OpAMD64VPRORD512 + return true case OpRotateAllRightInt32x4: - return rewriteValueAMD64_OpRotateAllRightInt32x4(v) + v.Op = OpAMD64VPRORD128 + return true case OpRotateAllRightInt32x8: - return rewriteValueAMD64_OpRotateAllRightInt32x8(v) + v.Op = OpAMD64VPRORD256 + return true case OpRotateAllRightInt64x2: - return rewriteValueAMD64_OpRotateAllRightInt64x2(v) + v.Op = OpAMD64VPRORQ128 + return true case OpRotateAllRightInt64x4: - return rewriteValueAMD64_OpRotateAllRightInt64x4(v) + v.Op = OpAMD64VPRORQ256 + return true case OpRotateAllRightInt64x8: - return rewriteValueAMD64_OpRotateAllRightInt64x8(v) + v.Op = OpAMD64VPRORQ512 + return true case OpRotateAllRightMaskedInt32x16: return rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v) case OpRotateAllRightMaskedInt32x4: @@ -3491,17 +3533,23 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateAllRightMaskedUint64x8: return rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v) case OpRotateAllRightUint32x16: - return rewriteValueAMD64_OpRotateAllRightUint32x16(v) + v.Op = OpAMD64VPRORD512 + return true case OpRotateAllRightUint32x4: - return rewriteValueAMD64_OpRotateAllRightUint32x4(v) + v.Op = OpAMD64VPRORD128 + return true case OpRotateAllRightUint32x8: - return rewriteValueAMD64_OpRotateAllRightUint32x8(v) + v.Op = OpAMD64VPRORD256 + return true case OpRotateAllRightUint64x2: - return rewriteValueAMD64_OpRotateAllRightUint64x2(v) + v.Op = OpAMD64VPRORQ128 + return true case OpRotateAllRightUint64x4: - return rewriteValueAMD64_OpRotateAllRightUint64x4(v) + v.Op = OpAMD64VPRORQ256 + return true case OpRotateAllRightUint64x8: - return rewriteValueAMD64_OpRotateAllRightUint64x8(v) + v.Op = OpAMD64VPRORQ512 + return true case OpRotateLeft16: v.Op = OpAMD64ROLW return true @@ -3937,59 +3985,86 @@ func rewriteValueAMD64(v *Value) bool { case OpSelectN: return rewriteValueAMD64_OpSelectN(v) case OpSet128Float32x8: - return rewriteValueAMD64_OpSet128Float32x8(v) + v.Op = OpAMD64VINSERTF128256 + return true case OpSet128Float64x4: - return rewriteValueAMD64_OpSet128Float64x4(v) + v.Op = OpAMD64VINSERTF128256 + return true case OpSet128Int16x16: - return rewriteValueAMD64_OpSet128Int16x16(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Int32x8: - return rewriteValueAMD64_OpSet128Int32x8(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Int64x4: - return rewriteValueAMD64_OpSet128Int64x4(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Int8x32: - return rewriteValueAMD64_OpSet128Int8x32(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint16x16: - return rewriteValueAMD64_OpSet128Uint16x16(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint32x8: - return rewriteValueAMD64_OpSet128Uint32x8(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint64x4: - return rewriteValueAMD64_OpSet128Uint64x4(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint8x32: - return rewriteValueAMD64_OpSet128Uint8x32(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSetElemInt16x8: - return rewriteValueAMD64_OpSetElemInt16x8(v) + v.Op = OpAMD64VPINSRW128 + return true case OpSetElemInt32x4: - return rewriteValueAMD64_OpSetElemInt32x4(v) + v.Op = OpAMD64VPINSRD128 + return true case OpSetElemInt64x2: - return rewriteValueAMD64_OpSetElemInt64x2(v) + v.Op = OpAMD64VPINSRQ128 + return true case OpSetElemInt8x16: - return rewriteValueAMD64_OpSetElemInt8x16(v) + v.Op = OpAMD64VPINSRB128 + return true case OpSetElemUint16x8: - return rewriteValueAMD64_OpSetElemUint16x8(v) + v.Op = OpAMD64VPINSRW128 + return true case OpSetElemUint32x4: - return rewriteValueAMD64_OpSetElemUint32x4(v) + v.Op = OpAMD64VPINSRD128 + return true case OpSetElemUint64x2: - return rewriteValueAMD64_OpSetElemUint64x2(v) + v.Op = OpAMD64VPINSRQ128 + return true case OpSetElemUint8x16: - return rewriteValueAMD64_OpSetElemUint8x16(v) + v.Op = OpAMD64VPINSRB128 + return true case OpShiftAllLeftAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v) + v.Op = OpAMD64VPSHLDW256 + return true case OpShiftAllLeftAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v) + v.Op = OpAMD64VPSHLDW512 + return true case OpShiftAllLeftAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v) + v.Op = OpAMD64VPSHLDW128 + return true case OpShiftAllLeftAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v) + v.Op = OpAMD64VPSHLDD512 + return true case OpShiftAllLeftAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v) + v.Op = OpAMD64VPSHLDD128 + return true case OpShiftAllLeftAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v) + v.Op = OpAMD64VPSHLDD256 + return true case OpShiftAllLeftAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v) + v.Op = OpAMD64VPSHLDQ128 + return true case OpShiftAllLeftAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v) + v.Op = OpAMD64VPSHLDQ256 + return true case OpShiftAllLeftAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v) + v.Op = OpAMD64VPSHLDQ512 + return true case OpShiftAllLeftAndFillUpperFromMaskedInt16x16: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v) case OpShiftAllLeftAndFillUpperFromMaskedInt16x32: @@ -4027,23 +4102,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftAndFillUpperFromMaskedUint64x8: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v) case OpShiftAllLeftAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v) + v.Op = OpAMD64VPSHLDW256 + return true case OpShiftAllLeftAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v) + v.Op = OpAMD64VPSHLDW512 + return true case OpShiftAllLeftAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v) + v.Op = OpAMD64VPSHLDW128 + return true case OpShiftAllLeftAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v) + v.Op = OpAMD64VPSHLDD512 + return true case OpShiftAllLeftAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v) + v.Op = OpAMD64VPSHLDD128 + return true case OpShiftAllLeftAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v) + v.Op = OpAMD64VPSHLDD256 + return true case OpShiftAllLeftAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v) + v.Op = OpAMD64VPSHLDQ128 + return true case OpShiftAllLeftAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v) + v.Op = OpAMD64VPSHLDQ256 + return true case OpShiftAllLeftAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v) + v.Op = OpAMD64VPSHLDQ512 + return true case OpShiftAllLeftInt16x16: v.Op = OpAMD64VPSLLW256 return true @@ -4099,23 +4183,32 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSLLQ512 return true case OpShiftAllRightAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v) + v.Op = OpAMD64VPSHRDW256 + return true case OpShiftAllRightAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v) + v.Op = OpAMD64VPSHRDW512 + return true case OpShiftAllRightAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v) + v.Op = OpAMD64VPSHRDW128 + return true case OpShiftAllRightAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v) + v.Op = OpAMD64VPSHRDD512 + return true case OpShiftAllRightAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v) + v.Op = OpAMD64VPSHRDD128 + return true case OpShiftAllRightAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v) + v.Op = OpAMD64VPSHRDD256 + return true case OpShiftAllRightAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v) + v.Op = OpAMD64VPSHRDQ128 + return true case OpShiftAllRightAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v) + v.Op = OpAMD64VPSHRDQ256 + return true case OpShiftAllRightAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v) + v.Op = OpAMD64VPSHRDQ512 + return true case OpShiftAllRightAndFillUpperFromMaskedInt16x16: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v) case OpShiftAllRightAndFillUpperFromMaskedInt16x32: @@ -4153,23 +4246,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightAndFillUpperFromMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v) case OpShiftAllRightAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v) + v.Op = OpAMD64VPSHRDW256 + return true case OpShiftAllRightAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v) + v.Op = OpAMD64VPSHRDW512 + return true case OpShiftAllRightAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v) + v.Op = OpAMD64VPSHRDW128 + return true case OpShiftAllRightAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v) + v.Op = OpAMD64VPSHRDD512 + return true case OpShiftAllRightAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v) + v.Op = OpAMD64VPSHRDD128 + return true case OpShiftAllRightAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v) + v.Op = OpAMD64VPSHRDD256 + return true case OpShiftAllRightAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v) + v.Op = OpAMD64VPSHRDQ128 + return true case OpShiftAllRightAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v) + v.Op = OpAMD64VPSHRDQ256 + return true case OpShiftAllRightAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v) + v.Op = OpAMD64VPSHRDQ512 + return true case OpShiftAllRightInt16x16: v.Op = OpAMD64VPSRLW256 return true @@ -33974,51 +34076,6 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v *Val return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) - // result: (VGF2P8AFFINEINVQB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) - // result: (VGF2P8AFFINEINVQB256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) - // result: (VGF2P8AFFINEINVQB512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -34079,51 +34136,6 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v *Value) bool return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x16 [a] x y) - // result: (VGF2P8AFFINEQB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x32 [a] x y) - // result: (VGF2P8AFFINEQB256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x64 [a] x y) - // result: (VGF2P8AFFINEQB512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -34178,240 +34190,6 @@ func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float32x8 [a] x) - // result: (VEXTRACTF128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float64x4 [a] x) - // result: (VEXTRACTF128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt16x8 [a] x) - // result: (VPEXTRW128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt32x4 [a] x) - // result: (VPEXTRD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt64x2 [a] x) - // result: (VPEXTRQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt8x16 [a] x) - // result: (VPEXTRB128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint16x8 [a] x) - // result: (VPEXTRW128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint32x4 [a] x) - // result: (VPEXTRD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint64x2 [a] x) - // result: (VPEXTRQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint8x16 [a] x) - // result: (VPEXTRB128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] // match: (GetG mem) @@ -44964,84 +44742,6 @@ func rewriteValueAMD64_OpPopCountMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt32x16 [a] x) - // result: (VPROLD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt32x4 [a] x) - // result: (VPROLD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt32x8 [a] x) - // result: (VPROLD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt64x2 [a] x) - // result: (VPROLQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt64x4 [a] x) - // result: (VPROLQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt64x8 [a] x) - // result: (VPROLQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -45258,162 +44958,6 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint32x16 [a] x) - // result: (VPROLD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint32x4 [a] x) - // result: (VPROLD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint32x8 [a] x) - // result: (VPROLD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint64x2 [a] x) - // result: (VPROLQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint64x4 [a] x) - // result: (VPROLQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint64x8 [a] x) - // result: (VPROLQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt32x16 [a] x) - // result: (VPRORD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt32x4 [a] x) - // result: (VPRORD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt32x8 [a] x) - // result: (VPRORD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt64x2 [a] x) - // result: (VPRORQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt64x4 [a] x) - // result: (VPRORQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt64x8 [a] x) - // result: (VPRORQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -45630,84 +45174,6 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint32x16 [a] x) - // result: (VPRORD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint32x4 [a] x) - // result: (VPRORD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint32x8 [a] x) - // result: (VPRORD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint64x2 [a] x) - // result: (VPRORQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint64x4 [a] x) - // result: (VPRORQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint64x8 [a] x) - // result: (VPRORQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -48805,411 +48271,6 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } -func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float32x8 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float64x4 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt16x8 [a] x y) - // result: (VPINSRW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt32x4 [a] x y) - // result: (VPINSRD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt8x16 [a] x y) - // result: (VPINSRB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint16x8 [a] x y) - // result: (VPINSRW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint32x4 [a] x y) - // result: (VPINSRD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint8x16 [a] x y) - // result: (VPINSRB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49570,141 +48631,6 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49813,141 +48739,6 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -50308,141 +49099,6 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] From 08cd62e9f50b10a19f96b94c1e75f868b958d113 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 9 Jul 2025 14:43:30 -0400 Subject: [PATCH 074/139] [dev.simd] cmd/compile: remove X15 from register mask mistakes were made. X15 is reserved zero and cannot be allocated normally. Change-Id: I70b24aa07dc31f9b40e306a9aae1d53dfea794f9 Reviewed-on: https://go-review.googlesource.com/c/go/+/686996 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 2 +- src/cmd/compile/internal/ssa/opGen.go | 1080 ++++++++--------- 2 files changed, 541 insertions(+), 541 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 150c609fc54776..35d26dfdfa16dd 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -126,7 +126,7 @@ func init() { g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") v = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") - w = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31") + w = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31") x15 = buildReg("X15") mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d9fea94fc3f65e..dc84a1f4fa813d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -18258,7 +18258,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18297,7 +18297,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18336,7 +18336,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18375,7 +18375,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18411,7 +18411,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVB2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18450,7 +18450,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVW2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18489,7 +18489,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVD2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18528,7 +18528,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQ2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18561,7 +18561,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18572,11 +18572,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18602,10 +18602,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18629,10 +18629,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18656,11 +18656,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18785,11 +18785,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18816,11 +18816,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18847,11 +18847,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18861,11 +18861,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18906,10 +18906,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18933,11 +18933,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19007,10 +19007,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19266,11 +19266,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19440,10 +19440,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19699,11 +19699,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19873,10 +19873,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19900,10 +19900,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20132,11 +20132,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20306,10 +20306,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20333,10 +20333,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20565,11 +20565,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20695,11 +20695,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20725,10 +20725,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20752,10 +20752,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20779,11 +20779,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20908,11 +20908,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20939,11 +20939,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20970,11 +20970,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20984,11 +20984,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21029,10 +21029,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21056,11 +21056,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21353,10 +21353,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21510,11 +21510,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21572,11 +21572,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21634,11 +21634,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21706,10 +21706,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21734,11 +21734,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21765,11 +21765,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21796,11 +21796,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21827,11 +21827,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21858,11 +21858,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21888,11 +21888,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21917,10 +21917,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21945,11 +21945,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21975,11 +21975,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22004,11 +22004,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22066,11 +22066,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22128,11 +22128,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22157,11 +22157,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22454,10 +22454,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22611,11 +22611,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22673,11 +22673,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22735,11 +22735,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22807,10 +22807,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22835,11 +22835,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22866,11 +22866,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22896,11 +22896,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22926,11 +22926,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22957,11 +22957,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22988,11 +22988,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23019,11 +23019,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23082,10 +23082,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23109,11 +23109,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23138,11 +23138,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23233,11 +23233,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23295,11 +23295,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23357,11 +23357,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23386,11 +23386,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23449,11 +23449,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23782,10 +23782,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23809,11 +23809,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23838,11 +23838,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24523,10 +24523,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24550,11 +24550,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24579,11 +24579,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24961,10 +24961,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25080,11 +25080,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25111,11 +25111,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25158,11 +25158,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25204,10 +25204,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25231,11 +25231,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25260,11 +25260,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25305,10 +25305,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25334,10 +25334,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25348,10 +25348,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25363,10 +25363,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25500,11 +25500,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25574,10 +25574,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25693,11 +25693,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25724,11 +25724,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25771,11 +25771,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25817,10 +25817,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25844,11 +25844,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25873,11 +25873,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25918,10 +25918,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25947,10 +25947,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25961,10 +25961,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25976,10 +25976,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26113,11 +26113,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26187,10 +26187,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26215,11 +26215,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26246,11 +26246,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26276,11 +26276,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26306,11 +26306,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26337,11 +26337,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26368,11 +26368,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26399,11 +26399,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26430,11 +26430,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26460,10 +26460,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26487,11 +26487,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26516,11 +26516,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26546,10 +26546,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26561,10 +26561,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26575,10 +26575,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26590,10 +26590,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26604,10 +26604,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26619,10 +26619,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26632,11 +26632,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26694,11 +26694,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26756,11 +26756,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26785,11 +26785,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26815,11 +26815,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27038,10 +27038,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27376,10 +27376,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27521,10 +27521,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27549,11 +27549,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27580,11 +27580,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27611,11 +27611,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27641,10 +27641,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27669,11 +27669,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27699,11 +27699,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27728,11 +27728,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27882,11 +27882,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27913,11 +27913,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27944,11 +27944,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27975,11 +27975,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28130,11 +28130,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28161,11 +28161,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28346,11 +28346,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28377,11 +28377,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28424,11 +28424,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28455,11 +28455,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28502,11 +28502,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28533,11 +28533,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28564,11 +28564,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28625,11 +28625,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28776,11 +28776,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28897,11 +28897,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28927,11 +28927,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28957,11 +28957,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28988,11 +28988,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29018,11 +29018,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29048,10 +29048,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29077,10 +29077,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29153,10 +29153,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29182,10 +29182,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29258,10 +29258,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29287,10 +29287,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29392,10 +29392,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29421,10 +29421,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29513,10 +29513,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29542,10 +29542,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29604,10 +29604,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29633,10 +29633,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29727,11 +29727,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29758,11 +29758,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29822,11 +29822,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29853,11 +29853,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29901,7 +29901,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -29945,11 +29945,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29976,11 +29976,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30040,10 +30040,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30069,10 +30069,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30098,11 +30098,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30129,11 +30129,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30206,10 +30206,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30235,10 +30235,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30279,11 +30279,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30310,11 +30310,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30373,10 +30373,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30402,10 +30402,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30431,11 +30431,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30462,11 +30462,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30539,10 +30539,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30568,10 +30568,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30612,11 +30612,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30643,11 +30643,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30706,10 +30706,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30735,10 +30735,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30764,11 +30764,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30795,11 +30795,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30859,10 +30859,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30888,10 +30888,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30917,11 +30917,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30948,11 +30948,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30996,7 +30996,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -31442,11 +31442,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31457,11 +31457,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31536,11 +31536,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31551,11 +31551,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31646,11 +31646,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31661,11 +31661,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, From 47b07a87a65584f7b1c1efa26cf94e551e72dc2c Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 21:16:03 +0000 Subject: [PATCH 075/139] [dev.simd] cmd/compile, simd: fix Int64x2 Greater output type to mask This CL is generated by CL 686821. Change-Id: I4bc4fa717ff858299b13955a40e750709a796fba Reviewed-on: https://go-review.googlesource.com/c/go/+/686998 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao --- src/simd/ops_amd64.go | 2 +- src/simd/simd_wrapped_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 55c4b32db00bf5..6f1c1a1b23ae2f 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -2248,7 +2248,7 @@ func (x Int32x8) Greater(y Int32x8) Mask32x8 // Greater compares for greater than. // // Asm: VPCMPGTQ, CPU Feature: AVX -func (x Int64x2) Greater(y Int64x2) Int64x2 +func (x Int64x2) Greater(y Int64x2) Mask64x2 // Greater compares for greater than. // diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 181a937d7ebc24..bdbb25bfce4e6a 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -4018,8 +4018,6 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) - case "Greater": - gotv = vec0.Greater(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4115,6 +4113,8 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() case "GreaterEqual": gotv = vec0.GreaterEqual(vec1).AsInt64x2() case "Less": From ab7f839280df8734c388046f957f7f37ae5b0998 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 22:33:25 +0000 Subject: [PATCH 076/139] [dev.simd] cmd/compile: fix maskreg/simdreg chaos This CL fixes some errors left by CL 685895. Change-Id: I35ee36287fc964a82fd3c88764b688bd4491be65 Reviewed-on: https://go-review.googlesource.com/c/go/+/687095 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/ssa.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 8bc7cf83a35be4..3e45097edf4c0c 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1043,8 +1043,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { x := v.Args[0].Reg() y := v.Reg() if v.Type.IsSIMD() { - x = simdReg(v.Args[0]) - y = simdReg(v) + x = simdOrMaskReg(v.Args[0]) + y = simdOrMaskReg(v) } if x != y { opregreg(s, moveByType(v.Type), y, x) @@ -1059,7 +1059,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG r := v.Reg() if v.Type.IsSIMD() { - r = simdReg(v) + r = simdOrMaskReg(v) } p.To.Reg = r @@ -1070,7 +1070,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } r := v.Args[0].Reg() if v.Type.IsSIMD() { - r = simdReg(v.Args[0]) + r = simdOrMaskReg(v.Args[0]) } p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG @@ -1906,7 +1906,7 @@ func simdReg(v *ssa.Value) int16 { func maskReg(v *ssa.Value) int16 { t := v.Type if !t.IsSIMD() { - base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) + base.Fatalf("maskReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) } switch t.Size() { case 8: @@ -1915,6 +1915,15 @@ func maskReg(v *ssa.Value) int16 { panic("unreachable") } +// XXX k mask + vec +func simdOrMaskReg(v *ssa.Value) int16 { + t := v.Type + if t.Size() <= 8 { + return maskReg(v) + } + return simdReg(v) +} + // XXX this is used for shift operations only. // regalloc will issue OpCopy with incorrect type, but the assigned // register should be correct, and this function is merely checking From ccb43dcec791cb70431840ec2138addb489b828e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 19:06:13 +0000 Subject: [PATCH 077/139] [dev.simd] cmd/compile: add VZEROUPPER and VZEROALL inst Change-Id: I41d60561fefdfa676e8b22648871ff1004711ac9 Reviewed-on: https://go-review.googlesource.com/c/go/+/686840 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 2 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 +++ src/cmd/compile/internal/ssa/opGen.go | 14 ++++++++++++++ 3 files changed, 19 insertions(+) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 3e45097edf4c0c..9c31b77e7031fb 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1445,6 +1445,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // XXX SIMD // XXX may change depending on how we handle aliased registers + case ssa.OpAMD64VZEROUPPER, ssa.OpAMD64VZEROALL: + s.Prog(v.Op.Asm()) case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 35d26dfdfa16dd..543233f4d831fe 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1311,6 +1311,9 @@ func init() { {name: "Zero128", argLength: 0, reg: v01, asm: "VPXOR"}, {name: "Zero256", argLength: 0, reg: v01, asm: "VPXOR"}, {name: "Zero512", argLength: 0, reg: w01, asm: "VPXORQ"}, + + {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, + {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index dc84a1f4fa813d..119badedcc6e14 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1196,6 +1196,8 @@ const ( OpAMD64Zero128 OpAMD64Zero256 OpAMD64Zero512 + OpAMD64VZEROUPPER + OpAMD64VZEROALL OpAMD64VADDPS512 OpAMD64VADDPSMasked512 OpAMD64VRCP14PS512 @@ -18565,6 +18567,18 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VZEROUPPER", + argLen: 0, + asm: x86.AVZEROUPPER, + reg: regInfo{}, + }, + { + name: "VZEROALL", + argLen: 0, + asm: x86.AVZEROALL, + reg: regInfo{}, + }, { name: "VADDPS512", argLen: 2, From 1440ff70362f85c86b54b5c428fd95cb6cb35d91 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 10 Jul 2025 22:04:21 +0000 Subject: [PATCH 078/139] [dev.simd] cmd/compile: exclude simd vars from merge local It looks like mergelocals pass's liveness analysis does not handle simd variables well. The added test forces two vectors to spill in a way that does not work with mergelocals: if the added check is removed, then `v` and `m` will be marked merged and spilled to the same location, failing the test. Change-Id: Ife4e4e939565d817fc24f7180cb791a5084dd191 Reviewed-on: https://go-review.googlesource.com/c/go/+/687375 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/func.go | 8 ++++++++ src/simd/simd_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 5736f0b8126484..01ce89cf47e315 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -850,6 +850,13 @@ func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name { // items larger than what CanSSA would allow (approximateky, we disallow things // marked as open defer slots so as to avoid complicating liveness // analysis. +// +// TODO: make SIMD variables mergible. +// +// Right now this check excludes SIMD vars because sometimes two live SIMD +// vectors will be put into the same partition by mergelocals, we need to figure +// out why because these vectors are big and should be merged when possible. +// Details in CL 687375. func IsMergeCandidate(n *ir.Name) bool { if base.Debug.MergeLocals == 0 || base.Flag.N != 0 || @@ -857,6 +864,7 @@ func IsMergeCandidate(n *ir.Name) bool { n.Type().Size() <= int64(3*types.PtrSize) || n.Addrtaken() || n.NonMergeable() || + n.Type().IsSIMD() || n.OpenDeferSlot() { return false } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index ebe241c4674466..36923319ff312c 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -364,3 +364,29 @@ func TestSlicesFloat64(t *testing.T) { } } } + +// TODO: try to reduce this test to be smaller. +func TestMergeLocals(t *testing.T) { + testMergeLocalswrapper(t, simd.Int64x4.Add) +} + +//go:noinline +func forceSpill() {} + +func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) simd.Int64x4) { + t.Helper() + s0 := []int64{0, 1, 2, 3} + s1 := []int64{-1, 0, -1, 0} + want := []int64{-1, 1, 1, 3} + v := simd.LoadInt64x4Slice(s0) + m := simd.LoadInt64x4Slice(s1) + forceSpill() + got := make([]int64, 4) + gotv := op(v, m) + gotv.StoreSlice(got) + for i := range len(want) { + if !(got[i] == want[i]) { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} From bbb6dccd8486d1dc0b3042865e7bc0fce54137fc Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 02:11:22 +0000 Subject: [PATCH 079/139] [dev.simd] simd: fix documentations This CL is generated by CL 687415. Change-Id: I2d778717013af613c442116658f42a4a4cc5d734 Reviewed-on: https://go-review.googlesource.com/c/go/+/687376 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- .../compile/internal/ssa/_gen/simdAMD64.rules | 12 +- .../internal/ssa/_gen/simdgenericOps.go | 12 +- src/cmd/compile/internal/ssa/opGen.go | 24 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 30 +- .../compile/internal/ssagen/simdintrinsics.go | 12 +- src/simd/ops_amd64.go | 1570 ++++++++--------- src/simd/simd_wrapped_test.go | 4 +- 7 files changed, 832 insertions(+), 832 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index c55a1f3f639711..7ac4df59589e40 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -380,12 +380,12 @@ (GaloisFieldAffineTransformUint8x16 ...) => (VGF2P8AFFINEQB128 ...) (GaloisFieldAffineTransformUint8x32 ...) => (VGF2P8AFFINEQB256 ...) (GaloisFieldAffineTransformUint8x64 ...) => (VGF2P8AFFINEQB512 ...) -(GaloisFieldAffineTransformInversedUint8x16 ...) => (VGF2P8AFFINEINVQB128 ...) -(GaloisFieldAffineTransformInversedUint8x32 ...) => (VGF2P8AFFINEINVQB256 ...) -(GaloisFieldAffineTransformInversedUint8x64 ...) => (VGF2P8AFFINEINVQB512 ...) -(GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(GaloisFieldAffineTransformInverseUint8x16 ...) => (VGF2P8AFFINEINVQB128 ...) +(GaloisFieldAffineTransformInverseUint8x32 ...) => (VGF2P8AFFINEINVQB256 ...) +(GaloisFieldAffineTransformInverseUint8x64 ...) => (VGF2P8AFFINEINVQB512 ...) +(GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 1079321da71b31..d07472b87684ed 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1665,20 +1665,20 @@ func simdGenericOps() []opData { {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 119badedcc6e14..d5c5085949b055 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5957,20 +5957,20 @@ const ( OpShiftAllRightAndFillUpperFromUint64x8 OpShiftAllRightAndFillUpperFromMaskedUint64x8 OpGaloisFieldAffineTransformUint8x16 - OpGaloisFieldAffineTransformInversedUint8x16 - OpGaloisFieldAffineTransformInversedMaskedUint8x16 + OpGaloisFieldAffineTransformInverseUint8x16 + OpGaloisFieldAffineTransformInverseMaskedUint8x16 OpGaloisFieldAffineTransformMaskedUint8x16 OpGetElemUint8x16 OpSetElemUint8x16 OpGaloisFieldAffineTransformUint8x32 - OpGaloisFieldAffineTransformInversedUint8x32 - OpGaloisFieldAffineTransformInversedMaskedUint8x32 + OpGaloisFieldAffineTransformInverseUint8x32 + OpGaloisFieldAffineTransformInverseMaskedUint8x32 OpGaloisFieldAffineTransformMaskedUint8x32 OpGet128Uint8x32 OpSet128Uint8x32 OpGaloisFieldAffineTransformUint8x64 - OpGaloisFieldAffineTransformInversedUint8x64 - OpGaloisFieldAffineTransformInversedMaskedUint8x64 + OpGaloisFieldAffineTransformInverseUint8x64 + OpGaloisFieldAffineTransformInverseMaskedUint8x64 OpGaloisFieldAffineTransformMaskedUint8x64 ) @@ -67930,13 +67930,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldAffineTransformInversedUint8x16", + name: "GaloisFieldAffineTransformInverseUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInversedMaskedUint8x16", + name: "GaloisFieldAffineTransformInverseMaskedUint8x16", auxType: auxInt8, argLen: 3, generic: true, @@ -67966,13 +67966,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldAffineTransformInversedUint8x32", + name: "GaloisFieldAffineTransformInverseUint8x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInversedMaskedUint8x32", + name: "GaloisFieldAffineTransformInverseMaskedUint8x32", auxType: auxInt8, argLen: 3, generic: true, @@ -68002,13 +68002,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldAffineTransformInversedUint8x64", + name: "GaloisFieldAffineTransformInverseUint8x64", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInversedMaskedUint8x64", + name: "GaloisFieldAffineTransformInverseMaskedUint8x64", auxType: auxInt8, argLen: 3, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 98bc0779f65e42..d258b3bd0e7f97 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1687,19 +1687,19 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v) case OpFusedMultiplySubAddMaskedFloat64x8: return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v) - case OpGaloisFieldAffineTransformInversedMaskedUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v) - case OpGaloisFieldAffineTransformInversedMaskedUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v) - case OpGaloisFieldAffineTransformInversedMaskedUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v) - case OpGaloisFieldAffineTransformInversedUint8x16: + case OpGaloisFieldAffineTransformInverseMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v) + case OpGaloisFieldAffineTransformInverseMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v) + case OpGaloisFieldAffineTransformInverseMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v) + case OpGaloisFieldAffineTransformInverseUint8x16: v.Op = OpAMD64VGF2P8AFFINEINVQB128 return true - case OpGaloisFieldAffineTransformInversedUint8x32: + case OpGaloisFieldAffineTransformInverseUint8x32: v.Op = OpAMD64VGF2P8AFFINEINVQB256 return true - case OpGaloisFieldAffineTransformInversedUint8x64: + case OpGaloisFieldAffineTransformInverseUint8x64: v.Op = OpAMD64VGF2P8AFFINEINVQB512 return true case OpGaloisFieldAffineTransformMaskedUint8x16: @@ -34016,12 +34016,12 @@ func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) + // match: (GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34036,12 +34036,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v *Val return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) + // match: (GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34056,12 +34056,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v *Val return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) + // match: (GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { a := auxIntToInt8(v.AuxInt) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 15351b678b49e8..ffd341d6aba47e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -391,12 +391,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 6f1c1a1b23ae2f..e2f0460274f217 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -68,62 +68,62 @@ func (x Int64x8) Absolute() Int64x8 /* AbsoluteMasked */ -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 @@ -282,152 +282,152 @@ func (x Uint64x8) Add(y Uint64x8) Uint64x8 /* AddMasked */ -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -558,62 +558,62 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -722,62 +722,62 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -816,32 +816,32 @@ func (x Float64x8) ApproximateReciprocal() Float64x8 /* ApproximateReciprocalMasked */ -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 @@ -880,32 +880,32 @@ func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 /* ApproximateReciprocalOfSqrtMasked */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 @@ -944,32 +944,32 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* AverageMasked */ -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 @@ -1030,32 +1030,32 @@ func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 /* CeilWithPrecisionMasked */ -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1094,32 +1094,32 @@ func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 /* DiffWithCeilWithPrecisionMasked */ -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1158,32 +1158,32 @@ func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 /* DiffWithFloorWithPrecisionMasked */ -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1222,32 +1222,32 @@ func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 /* DiffWithRoundWithPrecisionMasked */ -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1286,32 +1286,32 @@ func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 /* DiffWithTruncWithPrecisionMasked */ -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1350,32 +1350,32 @@ func (x Float64x8) Div(y Float64x8) Float64x8 /* DivMasked */ -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 @@ -1541,152 +1541,152 @@ func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* EqualMasked */ -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -1747,32 +1747,32 @@ func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 /* FloorWithPrecisionMasked */ -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1811,32 +1811,32 @@ func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddMasked */ -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 @@ -1875,32 +1875,32 @@ func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddSubMasked */ -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 @@ -1939,32 +1939,32 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplySubAddMasked */ -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 @@ -1995,67 +1995,67 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 -/* GaloisFieldAffineTransformInversed */ +/* GaloisFieldAffineTransformInverse */ -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 -/* GaloisFieldAffineTransformInversedMasked */ +/* GaloisFieldAffineTransformInverseMasked */ -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransformInversedMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransformInversedMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransformInversedMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 /* GaloisFieldAffineTransformMasked */ -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. @@ -2063,7 +2063,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInversedMasked(y Uint64x8, b uint8, // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. @@ -2071,7 +2071,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. @@ -2101,19 +2101,19 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 /* GaloisFieldMulMasked */ -// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 -// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 -// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX @@ -2519,304 +2519,304 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* GreaterEqualMasked */ -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* GreaterMasked */ -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -2855,32 +2855,32 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* IsNanMasked */ -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 @@ -3191,304 +3191,304 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 /* LessEqualMasked */ -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* LessMasked */ -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -3647,152 +3647,152 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 /* MaxMasked */ -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -3951,152 +3951,152 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 /* MinMasked */ -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -4167,32 +4167,32 @@ func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 /* MulByPowOf2Masked */ -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 @@ -4261,37 +4261,37 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 /* MulEvenWidenMasked */ -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX @@ -4331,32 +4331,32 @@ func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 @@ -4410,79 +4410,79 @@ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* MulLowMasked */ -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 /* MulMasked */ -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 @@ -4641,152 +4641,152 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* NotEqualMasked */ -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -4895,62 +4895,62 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -4994,36 +4994,36 @@ func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* PairDotProdAccumulateMasked */ -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* PairDotProdMasked */ -// PairDotProd multiplies the elements and add the pairs together, +// PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 -// PairDotProd multiplies the elements and add the pairs together, +// PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 -// PairDotProd multiplies the elements and add the pairs together, +// PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX @@ -5301,122 +5301,122 @@ func (x Uint64x8) PopCount() Uint64x8 /* PopCountMasked */ -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 @@ -5485,62 +5485,62 @@ func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 /* RotateAllLeftMasked */ -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Int32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Int32x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Int32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Int32x8 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Int32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Int32x16 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Int64x2 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Int64x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Int64x8 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Uint32x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Uint32x8 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Uint32x16 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Uint64x2 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Uint64x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Uint64x8 @@ -5609,62 +5609,62 @@ func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 /* RotateAllRightMasked */ -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Int32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Int32x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Int32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Int32x8 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Int32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Int32x16 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Int64x2 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Int64x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Int64x8 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Uint32x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Uint32x8 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Uint32x16 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Uint64x2 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Uint64x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Uint64x8 @@ -5733,62 +5733,62 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 /* RotateLeftMasked */ -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -5857,62 +5857,62 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* RotateRightMasked */ -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -5973,32 +5973,32 @@ func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 /* RoundWithPrecisionMasked */ -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -6067,62 +6067,62 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 /* SaturatedAddMasked */ -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 @@ -6146,17 +6146,17 @@ func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x1 /* SaturatedPairDotProdAccumulateMasked */ -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 @@ -6253,81 +6253,81 @@ func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 /* SaturatedSubMasked */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 /* SaturatedUnsignedSignedPairDotProd */ -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX2 func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX @@ -6335,19 +6335,19 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 /* SaturatedUnsignedSignedPairDotProdMasked */ -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX @@ -6387,32 +6387,32 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z In /* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 @@ -6695,109 +6695,109 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 /* ShiftAllLeftAndFillUpperFromMasked */ -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX @@ -6805,32 +6805,32 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x8, z Ma /* ShiftAllLeftMasked */ -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 @@ -7019,109 +7019,109 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 /* ShiftAllRightAndFillUpperFromMasked */ -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX @@ -7129,32 +7129,32 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x8, z M /* ShiftAllRightMasked */ -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 @@ -7198,17 +7198,17 @@ func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 /* ShiftAllRightSignExtendedMasked */ -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x8) Int64x8 @@ -7417,109 +7417,109 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftLeftAndFillUpperFromMasked */ -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX @@ -7527,92 +7527,92 @@ func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask /* ShiftLeftMasked */ -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -7821,109 +7821,109 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftRightAndFillUpperFromMasked */ -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX @@ -7931,92 +7931,92 @@ func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mas /* ShiftRightMasked */ -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -8115,92 +8115,92 @@ func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 /* ShiftRightSignExtendedMasked */ -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightSignExtendedMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightSignExtendedMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightSignExtendedMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightSignExtendedMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightSignExtendedMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightSignExtendedMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightSignExtendedMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightSignExtendedMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightSignExtendedMasked(y Int64x8, z Mask64x8) Int64x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftRightSignExtendedMasked(y Uint16x8, z Mask16x8) Uint16x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftRightSignExtendedMasked(y Uint16x16, z Mask16x16) Uint16x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftRightSignExtendedMasked(y Uint16x32, z Mask16x32) Uint16x32 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftRightSignExtendedMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftRightSignExtendedMasked(y Uint32x8, z Mask32x8) Uint32x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftRightSignExtendedMasked(y Uint32x16, z Mask32x16) Uint32x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftRightSignExtendedMasked(y Uint64x2, z Mask64x2) Uint64x2 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftRightSignExtendedMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightSignExtendedMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -8277,32 +8277,32 @@ func (x Float64x8) Sqrt() Float64x8 /* SqrtMasked */ -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 @@ -8461,152 +8461,152 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* SubMasked */ -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512EVEX func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512EVEX func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512EVEX func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512EVEX func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512EVEX func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512EVEX func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -8667,32 +8667,32 @@ func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 /* TruncWithPrecisionMasked */ -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -8731,32 +8731,32 @@ func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Ui /* UnsignedSignedQuadDotProdAccumulateMasked */ -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 @@ -8865,62 +8865,62 @@ func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index bdbb25bfce4e6a..62096a76cf7ab1 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7863,8 +7863,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // FloorWithPrecision // FloorWithPrecisionMasked // GaloisFieldAffineTransform -// GaloisFieldAffineTransformInversed -// GaloisFieldAffineTransformInversedMasked +// GaloisFieldAffineTransformInverse +// GaloisFieldAffineTransformInverseMasked // GaloisFieldAffineTransformMasked // Get128 // GetElem From 4993a91ae18f0e0f0edf6d86ff5bb26fd9182731 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 02:15:12 +0000 Subject: [PATCH 080/139] [dev.simd] simd: change imm param name to constant This CL is generated by CL 687416. Change-Id: I3e878264fe5269635309b904576e8807ac723573 Reviewed-on: https://go-review.googlesource.com/c/go/+/687377 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: David Chase --- src/simd/ops_amd64.go | 1032 +++++++++++++++++++++++++++++++---------- 1 file changed, 780 insertions(+), 252 deletions(-) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e2f0460274f217..e98aca1abfba64 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1000,321 +1000,441 @@ func (x Float64x4) Ceil() Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 +func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 +func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 +func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 +func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 +func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 +func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 /* CeilWithPrecisionMasked */ // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) CeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) CeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) CeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) CeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) CeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecision(prec uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecision(prec uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecision(prec uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecision(prec uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecision(prec uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 /* DiffWithCeilWithPrecisionMasked */ // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecision(prec uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecision(prec uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecision(prec uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecision(prec uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecision(prec uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 /* DiffWithFloorWithPrecisionMasked */ // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecision(prec uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecision(prec uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecision(prec uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecision(prec uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecision(prec uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 /* DiffWithRoundWithPrecisionMasked */ // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecision(prec uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecision(prec uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecision(prec uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecision(prec uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecision(prec uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 /* DiffWithTruncWithPrecisionMasked */ // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* Div */ @@ -1717,65 +1837,89 @@ func (x Float64x4) Floor() Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 +func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 +func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 +func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 +func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 +func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 +func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 /* FloorWithPrecisionMasked */ // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) FloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) FloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) FloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) FloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) FloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) FloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* FusedMultiplyAdd */ @@ -1976,6 +2120,8 @@ func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 @@ -1984,6 +2130,8 @@ func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 @@ -1992,6 +2140,8 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 @@ -2003,6 +2153,8 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 @@ -2012,6 +2164,8 @@ func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x1 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 @@ -2021,6 +2175,8 @@ func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x3 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 @@ -2032,6 +2188,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 @@ -2041,6 +2199,8 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 @@ -2050,6 +2210,8 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 @@ -2060,6 +2222,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 @@ -2068,6 +2232,8 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 @@ -2076,6 +2242,8 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 @@ -2123,95 +2291,131 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float32x8) Get128(imm uint8) Float32x4 +func (x Float32x8) Get128(index uint8) Float32x4 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float64x4) Get128(imm uint8) Float64x2 +func (x Float64x4) Get128(index uint8) Float64x2 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int8x32) Get128(imm uint8) Int8x16 +func (x Int8x32) Get128(index uint8) Int8x16 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int16x16) Get128(imm uint8) Int16x8 +func (x Int16x16) Get128(index uint8) Int16x8 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int32x8) Get128(imm uint8) Int32x4 +func (x Int32x8) Get128(index uint8) Int32x4 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int64x4) Get128(imm uint8) Int64x2 +func (x Int64x4) Get128(index uint8) Int64x2 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint8x32) Get128(imm uint8) Uint8x16 +func (x Uint8x32) Get128(index uint8) Uint8x16 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint16x16) Get128(imm uint8) Uint16x8 +func (x Uint16x16) Get128(index uint8) Uint16x8 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint32x8) Get128(imm uint8) Uint32x4 +func (x Uint32x8) Get128(index uint8) Uint32x4 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint64x4) Get128(imm uint8) Uint64x2 +func (x Uint64x4) Get128(index uint8) Uint64x2 /* GetElem */ // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Int8x16) GetElem(imm uint8) int8 +func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Int16x8) GetElem(imm uint8) int16 +func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRD, CPU Feature: AVX -func (x Int32x4) GetElem(imm uint8) int32 +func (x Int32x4) GetElem(index uint8) int32 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRQ, CPU Feature: AVX -func (x Int64x2) GetElem(imm uint8) int64 +func (x Int64x2) GetElem(index uint8) int64 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Uint8x16) GetElem(imm uint8) uint8 +func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Uint16x8) GetElem(imm uint8) uint16 +func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRD, CPU Feature: AVX -func (x Uint32x4) GetElem(imm uint8) uint32 +func (x Uint32x4) GetElem(index uint8) uint32 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRQ, CPU Feature: AVX -func (x Uint64x2) GetElem(imm uint8) uint64 +func (x Uint64x2) GetElem(index uint8) uint64 /* Greater */ @@ -5425,249 +5629,345 @@ func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllLeft(imm uint8) Int32x4 +func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllLeft(imm uint8) Int32x8 +func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllLeft(imm uint8) Int32x16 +func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllLeft(imm uint8) Int64x2 +func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllLeft(imm uint8) Int64x4 +func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllLeft(imm uint8) Int64x8 +func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllLeft(imm uint8) Uint32x4 +func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllLeft(imm uint8) Uint32x8 +func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllLeft(imm uint8) Uint32x16 +func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllLeft(imm uint8) Uint64x2 +func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 +func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 +func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 /* RotateAllLeftMasked */ // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateAllRight */ // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllRight(imm uint8) Int32x4 +func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllRight(imm uint8) Int32x8 +func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllRight(imm uint8) Int32x16 +func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllRight(imm uint8) Int64x2 +func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllRight(imm uint8) Int64x4 +func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllRight(imm uint8) Int64x8 +func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllRight(imm uint8) Uint32x4 +func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 +func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 +func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 +func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 +func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 +func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 /* RotateAllRightMasked */ // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateLeft */ @@ -5943,65 +6243,89 @@ func (x Float64x4) Round() Float64x4 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 +func (x Float32x4) RoundWithPrecision(prec uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 +func (x Float32x8) RoundWithPrecision(prec uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 +func (x Float32x16) RoundWithPrecision(prec uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 +func (x Float64x2) RoundWithPrecision(prec uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 +func (x Float64x4) RoundWithPrecision(prec uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 +func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 /* RoundWithPrecisionMasked */ // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) RoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) RoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) RoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) RoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) RoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) RoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* SaturatedAdd */ @@ -6421,95 +6745,131 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTF128, CPU Feature: AVX -func (x Float32x8) Set128(imm uint8, y Float32x4) Float32x8 +func (x Float32x8) Set128(index uint8, y Float32x4) Float32x8 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTF128, CPU Feature: AVX -func (x Float64x4) Set128(imm uint8, y Float64x2) Float64x4 +func (x Float64x4) Set128(index uint8, y Float64x2) Float64x4 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int8x32) Set128(imm uint8, y Int8x16) Int8x32 +func (x Int8x32) Set128(index uint8, y Int8x16) Int8x32 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int16x16) Set128(imm uint8, y Int16x8) Int16x16 +func (x Int16x16) Set128(index uint8, y Int16x8) Int16x16 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int32x8) Set128(imm uint8, y Int32x4) Int32x8 +func (x Int32x8) Set128(index uint8, y Int32x4) Int32x8 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int64x4) Set128(imm uint8, y Int64x2) Int64x4 +func (x Int64x4) Set128(index uint8, y Int64x2) Int64x4 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint8x32) Set128(imm uint8, y Uint8x16) Uint8x32 +func (x Uint8x32) Set128(index uint8, y Uint8x16) Uint8x32 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint16x16) Set128(imm uint8, y Uint16x8) Uint16x16 +func (x Uint16x16) Set128(index uint8, y Uint16x8) Uint16x16 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint32x8) Set128(imm uint8, y Uint32x4) Uint32x8 +func (x Uint32x8) Set128(index uint8, y Uint32x4) Uint32x8 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint64x4) Set128(imm uint8, y Uint64x2) Uint64x4 +func (x Uint64x4) Set128(index uint8, y Uint64x2) Uint64x4 /* SetElem */ // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRB, CPU Feature: AVX -func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 +func (x Int8x16) SetElem(index uint8, y int8) Int8x16 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRW, CPU Feature: AVX -func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 +func (x Int16x8) SetElem(index uint8, y int16) Int16x8 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 +func (x Int32x4) SetElem(index uint8, y int32) Int32x4 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRQ, CPU Feature: AVX -func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 +func (x Int64x2) SetElem(index uint8, y int64) Int64x2 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRB, CPU Feature: AVX -func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 +func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRW, CPU Feature: AVX -func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 +func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 +func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRQ, CPU Feature: AVX -func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 +func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 /* ShiftAllLeft */ @@ -6588,220 +6948,292 @@ func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllLeftAndFillUpperFromMasked */ // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ @@ -6912,220 +7344,292 @@ func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllRightAndFillUpperFromMasked */ // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllRightMasked */ @@ -8637,65 +9141,89 @@ func (x Float64x4) Trunc() Float64x4 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 +func (x Float32x4) TruncWithPrecision(prec uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 +func (x Float32x8) TruncWithPrecision(prec uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 +func (x Float32x16) TruncWithPrecision(prec uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 +func (x Float64x2) TruncWithPrecision(prec uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 +func (x Float64x4) TruncWithPrecision(prec uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 +func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 /* TruncWithPrecisionMasked */ // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) TruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) TruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) TruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ From b69622b83e38b58a461938163fdef03683a2a871 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 17:56:22 +0000 Subject: [PATCH 081/139] [dev.simd] cmd/compile, simd: adjust Shift.* operations This CL does: 1. Removes ShiftRightSignExtended, default signed vectors to shift arithmetic, and unsigned to shift logical. 2. Add the missing Shifts which were left out by YAML error in the generator. This CL is generated by CL 687595. Change-Id: I663115498adb91c82e89a8476e6748794e997cfa Reviewed-on: https://go-review.googlesource.com/c/go/+/687596 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 128 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 134 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 98 +- .../internal/ssa/_gen/simdgenericOps.go | 78 +- src/cmd/compile/internal/ssa/opGen.go | 1688 ++++++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1025 +++++----- .../compile/internal/ssagen/simdintrinsics.go | 78 +- src/simd/ops_amd64.go | 490 ++--- src/simd/simd_wrapped_test.go | 74 - 9 files changed, 1984 insertions(+), 1809 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 12a8c857bd4fee..e2d0dd17c65f73 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -273,15 +273,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSLLVQ128, ssa.OpAMD64VPSLLVQ256, ssa.OpAMD64VPSLLVQ512, - ssa.OpAMD64VPSRLVW128, - ssa.OpAMD64VPSRLVW256, - ssa.OpAMD64VPSRLVW512, - ssa.OpAMD64VPSRLVD128, - ssa.OpAMD64VPSRLVD256, - ssa.OpAMD64VPSRLVD512, - ssa.OpAMD64VPSRLVQ128, - ssa.OpAMD64VPSRLVQ256, - ssa.OpAMD64VPSRLVQ512, ssa.OpAMD64VPSRAVW128, ssa.OpAMD64VPSRAVW256, ssa.OpAMD64VPSRAVW512, @@ -291,6 +282,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAVQ128, ssa.OpAMD64VPSRAVQ256, ssa.OpAMD64VPSRAVQ512, + ssa.OpAMD64VPSRLVW128, + ssa.OpAMD64VPSRLVW256, + ssa.OpAMD64VPSRLVW512, + ssa.OpAMD64VPSRLVD128, + ssa.OpAMD64VPSRLVD256, + ssa.OpAMD64VPSRLVD512, + ssa.OpAMD64VPSRLVQ128, + ssa.OpAMD64VPSRLVQ256, + ssa.OpAMD64VPSRLVQ512, ssa.OpAMD64VPSIGNB128, ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, @@ -504,15 +504,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSLLVQMasked128, ssa.OpAMD64VPSLLVQMasked256, ssa.OpAMD64VPSLLVQMasked512, - ssa.OpAMD64VPSRLVWMasked128, - ssa.OpAMD64VPSRLVWMasked256, - ssa.OpAMD64VPSRLVWMasked512, - ssa.OpAMD64VPSRLVDMasked128, - ssa.OpAMD64VPSRLVDMasked256, - ssa.OpAMD64VPSRLVDMasked512, - ssa.OpAMD64VPSRLVQMasked128, - ssa.OpAMD64VPSRLVQMasked256, - ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VPSRAVWMasked128, ssa.OpAMD64VPSRAVWMasked256, ssa.OpAMD64VPSRAVWMasked512, @@ -522,6 +513,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAVQMasked128, ssa.OpAMD64VPSRAVQMasked256, ssa.OpAMD64VPSRAVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VSUBPSMasked128, ssa.OpAMD64VSUBPSMasked256, ssa.OpAMD64VSUBPSMasked512, @@ -845,36 +845,60 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPSLLW128, ssa.OpAMD64VPSLLW256, + ssa.OpAMD64VPSLLW512, ssa.OpAMD64VPSLLD128, ssa.OpAMD64VPSLLD256, + ssa.OpAMD64VPSLLD512, ssa.OpAMD64VPSLLQ128, ssa.OpAMD64VPSLLQ256, ssa.OpAMD64VPSLLQ512, - ssa.OpAMD64VPSRLW128, - ssa.OpAMD64VPSRLW256, - ssa.OpAMD64VPSRLD128, - ssa.OpAMD64VPSRLD256, - ssa.OpAMD64VPSRLQ128, - ssa.OpAMD64VPSRLQ256, - ssa.OpAMD64VPSRLQ512, ssa.OpAMD64VPSRAW128, ssa.OpAMD64VPSRAW256, + ssa.OpAMD64VPSRAW512, ssa.OpAMD64VPSRAD128, ssa.OpAMD64VPSRAD256, + ssa.OpAMD64VPSRAD512, ssa.OpAMD64VPSRAQ128, ssa.OpAMD64VPSRAQ256, - ssa.OpAMD64VPSRAQ512: + ssa.OpAMD64VPSRAQ512, + ssa.OpAMD64VPSRLW128, + ssa.OpAMD64VPSRLW256, + ssa.OpAMD64VPSRLW512, + ssa.OpAMD64VPSRLD128, + ssa.OpAMD64VPSRLD256, + ssa.OpAMD64VPSRLD512, + ssa.OpAMD64VPSRLQ128, + ssa.OpAMD64VPSRLQ256, + ssa.OpAMD64VPSRLQ512: p = simdVfpv(s, v) - case ssa.OpAMD64VPSLLQMasked128, + case ssa.OpAMD64VPSLLWMasked128, + ssa.OpAMD64VPSLLWMasked256, + ssa.OpAMD64VPSLLWMasked512, + ssa.OpAMD64VPSLLDMasked128, + ssa.OpAMD64VPSLLDMasked256, + ssa.OpAMD64VPSLLDMasked512, + ssa.OpAMD64VPSLLQMasked128, ssa.OpAMD64VPSLLQMasked256, ssa.OpAMD64VPSLLQMasked512, - ssa.OpAMD64VPSRLQMasked128, - ssa.OpAMD64VPSRLQMasked256, - ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSRAWMasked128, + ssa.OpAMD64VPSRAWMasked256, + ssa.OpAMD64VPSRAWMasked512, + ssa.OpAMD64VPSRADMasked128, + ssa.OpAMD64VPSRADMasked256, + ssa.OpAMD64VPSRADMasked512, ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, - ssa.OpAMD64VPSRAQMasked512: + ssa.OpAMD64VPSRAQMasked512, + ssa.OpAMD64VPSRLWMasked128, + ssa.OpAMD64VPSRLWMasked256, + ssa.OpAMD64VPSRLWMasked512, + ssa.OpAMD64VPSRLDMasked128, + ssa.OpAMD64VPSRLDMasked256, + ssa.OpAMD64VPSRLDMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512: p = simdVfpkv(s, v) case ssa.OpAMD64VPINSRB128, @@ -1198,6 +1222,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDQMasked128, ssa.OpAMD64VPSHLDQMasked256, ssa.OpAMD64VPSHLDQMasked512, + ssa.OpAMD64VPSLLWMasked128, + ssa.OpAMD64VPSLLWMasked256, + ssa.OpAMD64VPSLLWMasked512, + ssa.OpAMD64VPSLLDMasked128, + ssa.OpAMD64VPSLLDMasked256, + ssa.OpAMD64VPSLLDMasked512, ssa.OpAMD64VPSLLQMasked128, ssa.OpAMD64VPSLLQMasked256, ssa.OpAMD64VPSLLQMasked512, @@ -1210,12 +1240,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked128, ssa.OpAMD64VPSHRDQMasked256, ssa.OpAMD64VPSHRDQMasked512, - ssa.OpAMD64VPSRLQMasked128, - ssa.OpAMD64VPSRLQMasked256, - ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSRAWMasked128, + ssa.OpAMD64VPSRAWMasked256, + ssa.OpAMD64VPSRAWMasked512, + ssa.OpAMD64VPSRADMasked128, + ssa.OpAMD64VPSRADMasked256, + ssa.OpAMD64VPSRADMasked512, ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, ssa.OpAMD64VPSRAQMasked512, + ssa.OpAMD64VPSRLWMasked128, + ssa.OpAMD64VPSRLWMasked256, + ssa.OpAMD64VPSRLWMasked512, + ssa.OpAMD64VPSRLDMasked128, + ssa.OpAMD64VPSRLDMasked256, + ssa.OpAMD64VPSRLDMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, ssa.OpAMD64VPSHLDVWMasked128, ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, @@ -1243,15 +1285,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVQMasked128, ssa.OpAMD64VPSHRDVQMasked256, ssa.OpAMD64VPSHRDVQMasked512, - ssa.OpAMD64VPSRLVWMasked128, - ssa.OpAMD64VPSRLVWMasked256, - ssa.OpAMD64VPSRLVWMasked512, - ssa.OpAMD64VPSRLVDMasked128, - ssa.OpAMD64VPSRLVDMasked256, - ssa.OpAMD64VPSRLVDMasked512, - ssa.OpAMD64VPSRLVQMasked128, - ssa.OpAMD64VPSRLVQMasked256, - ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VPSRAVWMasked128, ssa.OpAMD64VPSRAVWMasked256, ssa.OpAMD64VPSRAVWMasked512, @@ -1261,6 +1294,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAVQMasked128, ssa.OpAMD64VPSRAVQMasked256, ssa.OpAMD64VPSRAVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, ssa.OpAMD64VSQRTPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 7ac4df59589e40..6043edad703012 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1239,15 +1239,19 @@ (SetElemUint64x2 ...) => (VPINSRQ128 ...) (ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) (ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftInt16x32 ...) => (VPSLLW512 ...) (ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) (ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftInt32x16 ...) => (VPSLLD512 ...) (ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) (ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) (ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftUint16x32 ...) => (VPSLLW512 ...) (ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) (ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftUint32x16 ...) => (VPSLLD512 ...) (ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) @@ -1287,23 +1291,39 @@ (ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 ...) => (VPSRLW128 ...) -(ShiftAllRightInt16x16 ...) => (VPSRLW256 ...) -(ShiftAllRightInt32x4 ...) => (VPSRLD128 ...) -(ShiftAllRightInt32x8 ...) => (VPSRLD256 ...) -(ShiftAllRightInt64x2 ...) => (VPSRLQ128 ...) -(ShiftAllRightInt64x4 ...) => (VPSRLQ256 ...) -(ShiftAllRightInt64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) +(ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) +(ShiftAllRightInt16x32 ...) => (VPSRAW512 ...) +(ShiftAllRightInt32x4 ...) => (VPSRAD128 ...) +(ShiftAllRightInt32x8 ...) => (VPSRAD256 ...) +(ShiftAllRightInt32x16 ...) => (VPSRAD512 ...) +(ShiftAllRightInt64x2 ...) => (VPSRAQ128 ...) +(ShiftAllRightInt64x4 ...) => (VPSRAQ256 ...) +(ShiftAllRightInt64x8 ...) => (VPSRAQ512 ...) (ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) (ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightUint16x32 ...) => (VPSRLW512 ...) (ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) (ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightUint32x16 ...) => (VPSRLD512 ...) (ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) (ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) (ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) @@ -1343,22 +1363,24 @@ (ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightSignExtendedInt16x8 ...) => (VPSRAW128 ...) -(ShiftAllRightSignExtendedInt16x16 ...) => (VPSRAW256 ...) -(ShiftAllRightSignExtendedInt32x4 ...) => (VPSRAD128 ...) -(ShiftAllRightSignExtendedInt32x8 ...) => (VPSRAD256 ...) -(ShiftAllRightSignExtendedInt64x2 ...) => (VPSRAQ128 ...) -(ShiftAllRightSignExtendedInt64x4 ...) => (VPSRAQ256 ...) -(ShiftAllRightSignExtendedInt64x8 ...) => (VPSRAQ512 ...) -(ShiftAllRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) (ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) @@ -1431,15 +1453,15 @@ (ShiftLeftMaskedUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftLeftMaskedUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftLeftMaskedUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftRightInt16x8 ...) => (VPSRLVW128 ...) -(ShiftRightInt16x16 ...) => (VPSRLVW256 ...) -(ShiftRightInt16x32 ...) => (VPSRLVW512 ...) -(ShiftRightInt32x4 ...) => (VPSRLVD128 ...) -(ShiftRightInt32x8 ...) => (VPSRLVD256 ...) -(ShiftRightInt32x16 ...) => (VPSRLVD512 ...) -(ShiftRightInt64x2 ...) => (VPSRLVQ128 ...) -(ShiftRightInt64x4 ...) => (VPSRLVQ256 ...) -(ShiftRightInt64x8 ...) => (VPSRLVQ512 ...) +(ShiftRightInt16x8 ...) => (VPSRAVW128 ...) +(ShiftRightInt16x16 ...) => (VPSRAVW256 ...) +(ShiftRightInt16x32 ...) => (VPSRAVW512 ...) +(ShiftRightInt32x4 ...) => (VPSRAVD128 ...) +(ShiftRightInt32x8 ...) => (VPSRAVD256 ...) +(ShiftRightInt32x16 ...) => (VPSRAVD512 ...) +(ShiftRightInt64x2 ...) => (VPSRAVQ128 ...) +(ShiftRightInt64x4 ...) => (VPSRAVQ256 ...) +(ShiftRightInt64x8 ...) => (VPSRAVQ512 ...) (ShiftRightUint16x8 ...) => (VPSRLVW128 ...) (ShiftRightUint16x16 ...) => (VPSRLVW256 ...) (ShiftRightUint16x32 ...) => (VPSRLVW512 ...) @@ -1485,15 +1507,15 @@ (ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) (ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) (ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftRightMaskedInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightMaskedInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightMaskedInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightMaskedInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightMaskedInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightMaskedInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightMaskedInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightMaskedInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightMaskedInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftRightMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightMaskedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightMaskedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightMaskedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightMaskedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightMaskedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightMaskedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftRightMaskedUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftRightMaskedUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftRightMaskedUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1503,42 +1525,6 @@ (ShiftRightMaskedUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftRightMaskedUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftRightMaskedUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftRightSignExtendedInt16x8 ...) => (VPSRAVW128 ...) -(ShiftRightSignExtendedInt16x16 ...) => (VPSRAVW256 ...) -(ShiftRightSignExtendedInt16x32 ...) => (VPSRAVW512 ...) -(ShiftRightSignExtendedInt32x4 ...) => (VPSRAVD128 ...) -(ShiftRightSignExtendedInt32x8 ...) => (VPSRAVD256 ...) -(ShiftRightSignExtendedInt32x16 ...) => (VPSRAVD512 ...) -(ShiftRightSignExtendedInt64x2 ...) => (VPSRAVQ128 ...) -(ShiftRightSignExtendedInt64x4 ...) => (VPSRAVQ256 ...) -(ShiftRightSignExtendedInt64x8 ...) => (VPSRAVQ512 ...) -(ShiftRightSignExtendedUint16x8 ...) => (VPSRAVW128 ...) -(ShiftRightSignExtendedUint16x16 ...) => (VPSRAVW256 ...) -(ShiftRightSignExtendedUint16x32 ...) => (VPSRAVW512 ...) -(ShiftRightSignExtendedUint32x4 ...) => (VPSRAVD128 ...) -(ShiftRightSignExtendedUint32x8 ...) => (VPSRAVD256 ...) -(ShiftRightSignExtendedUint32x16 ...) => (VPSRAVD512 ...) -(ShiftRightSignExtendedUint64x2 ...) => (VPSRAVQ128 ...) -(ShiftRightSignExtendedUint64x4 ...) => (VPSRAVQ256 ...) -(ShiftRightSignExtendedUint64x8 ...) => (VPSRAVQ512 ...) -(ShiftRightSignExtendedMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightSignExtendedMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightSignExtendedMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightSignExtendedMaskedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightSignExtendedMaskedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightSignExtendedMaskedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftRightSignExtendedMaskedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightSignExtendedMaskedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightSignExtendedMaskedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightSignExtendedMaskedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightSignExtendedMaskedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightSignExtendedMaskedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightSignExtendedMaskedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightSignExtendedMaskedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightSignExtendedMaskedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (SignInt8x16 ...) => (VPSIGNB128 ...) (SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index f0a149f7d8a88b..3f777db5b7d58e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -198,17 +198,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -233,15 +232,17 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW512", argLength: 2, reg: wfpw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW512", argLength: 2, reg: wfpw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -272,17 +273,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -315,15 +315,17 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: w31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLD512", argLength: 2, reg: wfpw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD512", argLength: 2, reg: wfpw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -362,17 +364,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -411,17 +412,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -453,19 +453,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORVQMasked128", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVQ128", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHLDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVQ128", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHRDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -494,19 +490,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -537,19 +529,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAQ512", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -625,6 +613,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -633,6 +625,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -641,36 +637,64 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index d07472b87684ed..1180d32586b41d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -312,8 +312,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, @@ -322,8 +323,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt16x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt16x16", argLength: 3, commutative: false}, {name: "SignInt16x16", argLength: 2, commutative: false}, {name: "SubInt16x16", argLength: 2, commutative: false}, {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, @@ -360,6 +359,10 @@ func simdGenericOps() []opData { {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, @@ -368,8 +371,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt16x32", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt16x32", argLength: 3, commutative: false}, {name: "SubInt16x32", argLength: 2, commutative: false}, {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, @@ -412,8 +413,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, @@ -422,8 +424,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt16x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt16x8", argLength: 3, commutative: false}, {name: "SignInt16x8", argLength: 2, commutative: false}, {name: "SubInt16x8", argLength: 2, commutative: false}, {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, @@ -468,6 +468,10 @@ func simdGenericOps() []opData { {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftAllLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, @@ -476,8 +480,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt32x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt32x16", argLength: 3, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, @@ -528,8 +530,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, @@ -538,8 +541,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt32x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt32x4", argLength: 3, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, @@ -591,8 +592,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, @@ -601,8 +603,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt32x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt32x8", argLength: 3, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, @@ -650,8 +650,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightSignExtendedInt64x2", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, @@ -660,8 +658,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt64x2", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "SubInt64x2", argLength: 2, commutative: false}, {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, {name: "XorInt64x2", argLength: 2, commutative: true}, @@ -706,8 +702,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightSignExtendedInt64x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, @@ -716,8 +710,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt64x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, {name: "SubMaskedInt64x4", argLength: 3, commutative: false}, {name: "XorInt64x4", argLength: 2, commutative: true}, @@ -762,8 +754,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightSignExtendedInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, @@ -772,8 +762,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt64x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "SubInt64x8", argLength: 2, commutative: false}, {name: "SubMaskedInt64x8", argLength: 3, commutative: false}, {name: "XorInt64x8", argLength: 2, commutative: true}, @@ -906,7 +894,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, @@ -915,8 +905,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint16x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint16x16", argLength: 3, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "SubMaskedUint16x16", argLength: 3, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, @@ -948,6 +936,10 @@ func simdGenericOps() []opData { {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, @@ -956,8 +948,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint16x32", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint16x32", argLength: 3, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "SubMaskedUint16x32", argLength: 3, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, @@ -994,7 +984,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, @@ -1003,8 +995,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint16x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint16x8", argLength: 3, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, @@ -1040,6 +1030,10 @@ func simdGenericOps() []opData { {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, @@ -1048,8 +1042,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint32x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, @@ -1092,7 +1084,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, @@ -1101,8 +1095,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint32x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, @@ -1145,7 +1137,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, @@ -1154,8 +1148,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint32x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, @@ -1206,8 +1198,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint64x2", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint64x2", argLength: 3, commutative: false}, {name: "SubUint64x2", argLength: 2, commutative: false}, {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, {name: "XorUint64x2", argLength: 2, commutative: true}, @@ -1256,8 +1246,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint64x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint64x4", argLength: 3, commutative: false}, {name: "SubUint64x4", argLength: 2, commutative: false}, {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, {name: "XorUint64x4", argLength: 2, commutative: true}, @@ -1306,8 +1294,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint64x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint64x8", argLength: 3, commutative: false}, {name: "SubUint64x8", argLength: 2, commutative: false}, {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, {name: "XorUint64x8", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d5c5085949b055..9067023f3a7c19 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1393,17 +1393,16 @@ const ( OpAMD64VPSUBSW256 OpAMD64VPSUBSWMasked256 OpAMD64VPSLLW256 - OpAMD64VPSRLW256 + OpAMD64VPSLLWMasked256 OpAMD64VPSRAW256 + OpAMD64VPSRAWMasked256 OpAMD64VPSLLVW256 OpAMD64VPSHLDVW256 OpAMD64VPSHLDVWMasked256 OpAMD64VPSLLVWMasked256 - OpAMD64VPSRLVW256 + OpAMD64VPSRAVW256 OpAMD64VPSHRDVW256 OpAMD64VPSHRDVWMasked256 - OpAMD64VPSRLVWMasked256 - OpAMD64VPSRAVW256 OpAMD64VPSRAVWMasked256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 @@ -1428,15 +1427,17 @@ const ( OpAMD64VPADDSWMasked512 OpAMD64VPSUBSW512 OpAMD64VPSUBSWMasked512 + OpAMD64VPSLLW512 + OpAMD64VPSLLWMasked512 + OpAMD64VPSRAW512 + OpAMD64VPSRAWMasked512 OpAMD64VPSLLVW512 OpAMD64VPSHLDVW512 OpAMD64VPSHLDVWMasked512 OpAMD64VPSLLVWMasked512 - OpAMD64VPSRLVW512 + OpAMD64VPSRAVW512 OpAMD64VPSHRDVW512 OpAMD64VPSHRDVWMasked512 - OpAMD64VPSRLVWMasked512 - OpAMD64VPSRAVW512 OpAMD64VPSRAVWMasked512 OpAMD64VPSUBW512 OpAMD64VPSUBWMasked512 @@ -1467,17 +1468,16 @@ const ( OpAMD64VPSUBSW128 OpAMD64VPSUBSWMasked128 OpAMD64VPSLLW128 - OpAMD64VPSRLW128 + OpAMD64VPSLLWMasked128 OpAMD64VPSRAW128 + OpAMD64VPSRAWMasked128 OpAMD64VPSLLVW128 OpAMD64VPSHLDVW128 OpAMD64VPSHLDVWMasked128 OpAMD64VPSLLVWMasked128 - OpAMD64VPSRLVW128 + OpAMD64VPSRAVW128 OpAMD64VPSHRDVW128 OpAMD64VPSHRDVWMasked128 - OpAMD64VPSRLVWMasked128 - OpAMD64VPSRAVW128 OpAMD64VPSRAVWMasked128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 @@ -1510,15 +1510,17 @@ const ( OpAMD64VPDPWSSDSMasked512 OpAMD64VPDPBUSDS512 OpAMD64VPDPBUSDSMasked512 + OpAMD64VPSLLD512 + OpAMD64VPSLLDMasked512 + OpAMD64VPSRAD512 + OpAMD64VPSRADMasked512 OpAMD64VPSLLVD512 OpAMD64VPSHLDVD512 OpAMD64VPSHLDVDMasked512 OpAMD64VPSLLVDMasked512 - OpAMD64VPSRLVD512 + OpAMD64VPSRAVD512 OpAMD64VPSHRDVD512 OpAMD64VPSHRDVDMasked512 - OpAMD64VPSRLVDMasked512 - OpAMD64VPSRAVD512 OpAMD64VPSRAVDMasked512 OpAMD64VPSUBD512 OpAMD64VPSUBDMasked512 @@ -1557,17 +1559,16 @@ const ( OpAMD64VPDPBUSDS128 OpAMD64VPDPBUSDSMasked128 OpAMD64VPSLLD128 - OpAMD64VPSRLD128 + OpAMD64VPSLLDMasked128 OpAMD64VPSRAD128 + OpAMD64VPSRADMasked128 OpAMD64VPSLLVD128 OpAMD64VPSHLDVD128 OpAMD64VPSHLDVDMasked128 OpAMD64VPSLLVDMasked128 - OpAMD64VPSRLVD128 + OpAMD64VPSRAVD128 OpAMD64VPSHRDVD128 OpAMD64VPSHRDVDMasked128 - OpAMD64VPSRLVDMasked128 - OpAMD64VPSRAVD128 OpAMD64VPSRAVDMasked128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 @@ -1606,17 +1607,16 @@ const ( OpAMD64VPDPBUSDS256 OpAMD64VPDPBUSDSMasked256 OpAMD64VPSLLD256 - OpAMD64VPSRLD256 + OpAMD64VPSLLDMasked256 OpAMD64VPSRAD256 + OpAMD64VPSRADMasked256 OpAMD64VPSLLVD256 OpAMD64VPSHLDVD256 OpAMD64VPSHLDVDMasked256 OpAMD64VPSLLVDMasked256 - OpAMD64VPSRLVD256 + OpAMD64VPSRAVD256 OpAMD64VPSHRDVD256 OpAMD64VPSHRDVDMasked256 - OpAMD64VPSRLVDMasked256 - OpAMD64VPSRAVD256 OpAMD64VPSRAVDMasked256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 @@ -1648,19 +1648,15 @@ const ( OpAMD64VPRORVQMasked128 OpAMD64VPSLLQ128 OpAMD64VPSLLQMasked128 - OpAMD64VPSRLQ128 - OpAMD64VPSRLQMasked128 OpAMD64VPSRAQ128 OpAMD64VPSRAQMasked128 OpAMD64VPSLLVQ128 OpAMD64VPSHLDVQ128 OpAMD64VPSHLDVQMasked128 OpAMD64VPSLLVQMasked128 - OpAMD64VPSRLVQ128 + OpAMD64VPSRAVQ128 OpAMD64VPSHRDVQ128 OpAMD64VPSHRDVQMasked128 - OpAMD64VPSRLVQMasked128 - OpAMD64VPSRAVQ128 OpAMD64VPSRAVQMasked128 OpAMD64VPSUBQ128 OpAMD64VPSUBQMasked128 @@ -1689,19 +1685,15 @@ const ( OpAMD64VPRORVQMasked256 OpAMD64VPSLLQ256 OpAMD64VPSLLQMasked256 - OpAMD64VPSRLQ256 - OpAMD64VPSRLQMasked256 OpAMD64VPSRAQ256 OpAMD64VPSRAQMasked256 OpAMD64VPSLLVQ256 OpAMD64VPSHLDVQ256 OpAMD64VPSHLDVQMasked256 OpAMD64VPSLLVQMasked256 - OpAMD64VPSRLVQ256 + OpAMD64VPSRAVQ256 OpAMD64VPSHRDVQ256 OpAMD64VPSHRDVQMasked256 - OpAMD64VPSRLVQMasked256 - OpAMD64VPSRAVQ256 OpAMD64VPSRAVQMasked256 OpAMD64VPSUBQ256 OpAMD64VPSUBQMasked256 @@ -1732,19 +1724,15 @@ const ( OpAMD64VPRORVQMasked512 OpAMD64VPSLLQ512 OpAMD64VPSLLQMasked512 - OpAMD64VPSRLQ512 - OpAMD64VPSRLQMasked512 OpAMD64VPSRAQ512 OpAMD64VPSRAQMasked512 OpAMD64VPSLLVQ512 OpAMD64VPSHLDVQ512 OpAMD64VPSHLDVQMasked512 OpAMD64VPSLLVQMasked512 - OpAMD64VPSRLVQ512 + OpAMD64VPSRAVQ512 OpAMD64VPSHRDVQ512 OpAMD64VPSHRDVQMasked512 - OpAMD64VPSRLVQMasked512 - OpAMD64VPSRAVQ512 OpAMD64VPSRAVQMasked512 OpAMD64VPSUBQ512 OpAMD64VPSUBQMasked512 @@ -1820,6 +1808,10 @@ const ( OpAMD64VPMINUWMasked256 OpAMD64VPMULHUW256 OpAMD64VPMULHUWMasked256 + OpAMD64VPSRLW256 + OpAMD64VPSRLWMasked256 + OpAMD64VPSRLVW256 + OpAMD64VPSRLVWMasked256 OpAMD64VPAVGW512 OpAMD64VPAVGWMasked512 OpAMD64VPMAXUW512 @@ -1828,6 +1820,10 @@ const ( OpAMD64VPMINUWMasked512 OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked512 + OpAMD64VPSRLW512 + OpAMD64VPSRLWMasked512 + OpAMD64VPSRLVW512 + OpAMD64VPSRLVWMasked512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUW128 @@ -1836,36 +1832,64 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMULHUW128 OpAMD64VPMULHUWMasked128 + OpAMD64VPSRLW128 + OpAMD64VPSRLWMasked128 + OpAMD64VPSRLVW128 + OpAMD64VPSRLVWMasked128 OpAMD64VPMAXUD512 OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 OpAMD64VPMINUDMasked512 + OpAMD64VPSRLD512 + OpAMD64VPSRLDMasked512 + OpAMD64VPSRLVD512 + OpAMD64VPSRLVDMasked512 OpAMD64VPMAXUD128 OpAMD64VPMAXUDMasked128 OpAMD64VPMINUD128 OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 + OpAMD64VPSRLD128 + OpAMD64VPSRLDMasked128 + OpAMD64VPSRLVD128 + OpAMD64VPSRLVDMasked128 OpAMD64VPMAXUD256 OpAMD64VPMAXUDMasked256 OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 + OpAMD64VPSRLD256 + OpAMD64VPSRLDMasked256 + OpAMD64VPSRLVD256 + OpAMD64VPSRLVDMasked256 OpAMD64VPMAXUQ128 OpAMD64VPMAXUQMasked128 OpAMD64VPMINUQ128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 + OpAMD64VPSRLQ128 + OpAMD64VPSRLQMasked128 + OpAMD64VPSRLVQ128 + OpAMD64VPSRLVQMasked128 OpAMD64VPMAXUQ256 OpAMD64VPMAXUQMasked256 OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 + OpAMD64VPSRLQ256 + OpAMD64VPSRLQMasked256 + OpAMD64VPSRLVQ256 + OpAMD64VPSRLVQMasked256 OpAMD64VPMAXUQ512 OpAMD64VPMAXUQMasked512 OpAMD64VPMINUQ512 OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQ512 OpAMD64VPMULUDQMasked512 + OpAMD64VPSRLQ512 + OpAMD64VPSRLQMasked512 + OpAMD64VPSRLVQ512 + OpAMD64VPSRLVQMasked512 OpAMD64VPAVGB128 OpAMD64VPAVGBMasked128 OpAMD64VGF2P8MULB128 @@ -4604,8 +4628,9 @@ const ( OpSaturatedSubInt16x16 OpSaturatedSubMaskedInt16x16 OpShiftAllLeftInt16x16 + OpShiftAllLeftMaskedInt16x16 OpShiftAllRightInt16x16 - OpShiftAllRightSignExtendedInt16x16 + OpShiftAllRightMaskedInt16x16 OpShiftLeftInt16x16 OpShiftLeftAndFillUpperFromInt16x16 OpShiftLeftAndFillUpperFromMaskedInt16x16 @@ -4614,8 +4639,6 @@ const ( OpShiftRightAndFillUpperFromInt16x16 OpShiftRightAndFillUpperFromMaskedInt16x16 OpShiftRightMaskedInt16x16 - OpShiftRightSignExtendedInt16x16 - OpShiftRightSignExtendedMaskedInt16x16 OpSignInt16x16 OpSubInt16x16 OpSubMaskedInt16x16 @@ -4652,6 +4675,10 @@ const ( OpSaturatedAddMaskedInt16x32 OpSaturatedSubInt16x32 OpSaturatedSubMaskedInt16x32 + OpShiftAllLeftInt16x32 + OpShiftAllLeftMaskedInt16x32 + OpShiftAllRightInt16x32 + OpShiftAllRightMaskedInt16x32 OpShiftLeftInt16x32 OpShiftLeftAndFillUpperFromInt16x32 OpShiftLeftAndFillUpperFromMaskedInt16x32 @@ -4660,8 +4687,6 @@ const ( OpShiftRightAndFillUpperFromInt16x32 OpShiftRightAndFillUpperFromMaskedInt16x32 OpShiftRightMaskedInt16x32 - OpShiftRightSignExtendedInt16x32 - OpShiftRightSignExtendedMaskedInt16x32 OpSubInt16x32 OpSubMaskedInt16x32 OpAbsoluteInt16x8 @@ -4704,8 +4729,9 @@ const ( OpSaturatedSubInt16x8 OpSaturatedSubMaskedInt16x8 OpShiftAllLeftInt16x8 + OpShiftAllLeftMaskedInt16x8 OpShiftAllRightInt16x8 - OpShiftAllRightSignExtendedInt16x8 + OpShiftAllRightMaskedInt16x8 OpShiftLeftInt16x8 OpShiftLeftAndFillUpperFromInt16x8 OpShiftLeftAndFillUpperFromMaskedInt16x8 @@ -4714,8 +4740,6 @@ const ( OpShiftRightAndFillUpperFromInt16x8 OpShiftRightAndFillUpperFromMaskedInt16x8 OpShiftRightMaskedInt16x8 - OpShiftRightSignExtendedInt16x8 - OpShiftRightSignExtendedMaskedInt16x8 OpSignInt16x8 OpSubInt16x8 OpSubMaskedInt16x8 @@ -4760,6 +4784,10 @@ const ( OpSaturatedPairDotProdAccumulateMaskedInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpShiftAllLeftInt32x16 + OpShiftAllLeftMaskedInt32x16 + OpShiftAllRightInt32x16 + OpShiftAllRightMaskedInt32x16 OpShiftLeftInt32x16 OpShiftLeftAndFillUpperFromInt32x16 OpShiftLeftAndFillUpperFromMaskedInt32x16 @@ -4768,8 +4796,6 @@ const ( OpShiftRightAndFillUpperFromInt32x16 OpShiftRightAndFillUpperFromMaskedInt32x16 OpShiftRightMaskedInt32x16 - OpShiftRightSignExtendedInt32x16 - OpShiftRightSignExtendedMaskedInt32x16 OpSubInt32x16 OpSubMaskedInt32x16 OpUnsignedSignedQuadDotProdAccumulateInt32x16 @@ -4820,8 +4846,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpShiftAllLeftInt32x4 + OpShiftAllLeftMaskedInt32x4 OpShiftAllRightInt32x4 - OpShiftAllRightSignExtendedInt32x4 + OpShiftAllRightMaskedInt32x4 OpShiftLeftInt32x4 OpShiftLeftAndFillUpperFromInt32x4 OpShiftLeftAndFillUpperFromMaskedInt32x4 @@ -4830,8 +4857,6 @@ const ( OpShiftRightAndFillUpperFromInt32x4 OpShiftRightAndFillUpperFromMaskedInt32x4 OpShiftRightMaskedInt32x4 - OpShiftRightSignExtendedInt32x4 - OpShiftRightSignExtendedMaskedInt32x4 OpSignInt32x4 OpSubInt32x4 OpSubMaskedInt32x4 @@ -4883,8 +4908,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpShiftAllLeftInt32x8 + OpShiftAllLeftMaskedInt32x8 OpShiftAllRightInt32x8 - OpShiftAllRightSignExtendedInt32x8 + OpShiftAllRightMaskedInt32x8 OpShiftLeftInt32x8 OpShiftLeftAndFillUpperFromInt32x8 OpShiftLeftAndFillUpperFromMaskedInt32x8 @@ -4893,8 +4919,6 @@ const ( OpShiftRightAndFillUpperFromInt32x8 OpShiftRightAndFillUpperFromMaskedInt32x8 OpShiftRightMaskedInt32x8 - OpShiftRightSignExtendedInt32x8 - OpShiftRightSignExtendedMaskedInt32x8 OpSignInt32x8 OpSubInt32x8 OpSubMaskedInt32x8 @@ -4942,8 +4966,6 @@ const ( OpShiftAllLeftMaskedInt64x2 OpShiftAllRightInt64x2 OpShiftAllRightMaskedInt64x2 - OpShiftAllRightSignExtendedInt64x2 - OpShiftAllRightSignExtendedMaskedInt64x2 OpShiftLeftInt64x2 OpShiftLeftAndFillUpperFromInt64x2 OpShiftLeftAndFillUpperFromMaskedInt64x2 @@ -4952,8 +4974,6 @@ const ( OpShiftRightAndFillUpperFromInt64x2 OpShiftRightAndFillUpperFromMaskedInt64x2 OpShiftRightMaskedInt64x2 - OpShiftRightSignExtendedInt64x2 - OpShiftRightSignExtendedMaskedInt64x2 OpSubInt64x2 OpSubMaskedInt64x2 OpXorInt64x2 @@ -4998,8 +5018,6 @@ const ( OpShiftAllLeftMaskedInt64x4 OpShiftAllRightInt64x4 OpShiftAllRightMaskedInt64x4 - OpShiftAllRightSignExtendedInt64x4 - OpShiftAllRightSignExtendedMaskedInt64x4 OpShiftLeftInt64x4 OpShiftLeftAndFillUpperFromInt64x4 OpShiftLeftAndFillUpperFromMaskedInt64x4 @@ -5008,8 +5026,6 @@ const ( OpShiftRightAndFillUpperFromInt64x4 OpShiftRightAndFillUpperFromMaskedInt64x4 OpShiftRightMaskedInt64x4 - OpShiftRightSignExtendedInt64x4 - OpShiftRightSignExtendedMaskedInt64x4 OpSubInt64x4 OpSubMaskedInt64x4 OpXorInt64x4 @@ -5054,8 +5070,6 @@ const ( OpShiftAllLeftMaskedInt64x8 OpShiftAllRightInt64x8 OpShiftAllRightMaskedInt64x8 - OpShiftAllRightSignExtendedInt64x8 - OpShiftAllRightSignExtendedMaskedInt64x8 OpShiftLeftInt64x8 OpShiftLeftAndFillUpperFromInt64x8 OpShiftLeftAndFillUpperFromMaskedInt64x8 @@ -5064,8 +5078,6 @@ const ( OpShiftRightAndFillUpperFromInt64x8 OpShiftRightAndFillUpperFromMaskedInt64x8 OpShiftRightMaskedInt64x8 - OpShiftRightSignExtendedInt64x8 - OpShiftRightSignExtendedMaskedInt64x8 OpSubInt64x8 OpSubMaskedInt64x8 OpXorInt64x8 @@ -5198,7 +5210,9 @@ const ( OpSaturatedSubUint16x16 OpSaturatedSubMaskedUint16x16 OpShiftAllLeftUint16x16 + OpShiftAllLeftMaskedUint16x16 OpShiftAllRightUint16x16 + OpShiftAllRightMaskedUint16x16 OpShiftLeftUint16x16 OpShiftLeftAndFillUpperFromUint16x16 OpShiftLeftAndFillUpperFromMaskedUint16x16 @@ -5207,8 +5221,6 @@ const ( OpShiftRightAndFillUpperFromUint16x16 OpShiftRightAndFillUpperFromMaskedUint16x16 OpShiftRightMaskedUint16x16 - OpShiftRightSignExtendedUint16x16 - OpShiftRightSignExtendedMaskedUint16x16 OpSubUint16x16 OpSubMaskedUint16x16 OpXorUint16x16 @@ -5240,6 +5252,10 @@ const ( OpSaturatedAddMaskedUint16x32 OpSaturatedSubUint16x32 OpSaturatedSubMaskedUint16x32 + OpShiftAllLeftUint16x32 + OpShiftAllLeftMaskedUint16x32 + OpShiftAllRightUint16x32 + OpShiftAllRightMaskedUint16x32 OpShiftLeftUint16x32 OpShiftLeftAndFillUpperFromUint16x32 OpShiftLeftAndFillUpperFromMaskedUint16x32 @@ -5248,8 +5264,6 @@ const ( OpShiftRightAndFillUpperFromUint16x32 OpShiftRightAndFillUpperFromMaskedUint16x32 OpShiftRightMaskedUint16x32 - OpShiftRightSignExtendedUint16x32 - OpShiftRightSignExtendedMaskedUint16x32 OpSubUint16x32 OpSubMaskedUint16x32 OpAddUint16x8 @@ -5286,7 +5300,9 @@ const ( OpSaturatedSubUint16x8 OpSaturatedSubMaskedUint16x8 OpShiftAllLeftUint16x8 + OpShiftAllLeftMaskedUint16x8 OpShiftAllRightUint16x8 + OpShiftAllRightMaskedUint16x8 OpShiftLeftUint16x8 OpShiftLeftAndFillUpperFromUint16x8 OpShiftLeftAndFillUpperFromMaskedUint16x8 @@ -5295,8 +5311,6 @@ const ( OpShiftRightAndFillUpperFromUint16x8 OpShiftRightAndFillUpperFromMaskedUint16x8 OpShiftRightMaskedUint16x8 - OpShiftRightSignExtendedUint16x8 - OpShiftRightSignExtendedMaskedUint16x8 OpSubUint16x8 OpSubMaskedUint16x8 OpXorUint16x8 @@ -5332,6 +5346,10 @@ const ( OpRotateRightMaskedUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 + OpShiftAllLeftUint32x16 + OpShiftAllLeftMaskedUint32x16 + OpShiftAllRightUint32x16 + OpShiftAllRightMaskedUint32x16 OpShiftLeftUint32x16 OpShiftLeftAndFillUpperFromUint32x16 OpShiftLeftAndFillUpperFromMaskedUint32x16 @@ -5340,8 +5358,6 @@ const ( OpShiftRightAndFillUpperFromUint32x16 OpShiftRightAndFillUpperFromMaskedUint32x16 OpShiftRightMaskedUint32x16 - OpShiftRightSignExtendedUint32x16 - OpShiftRightSignExtendedMaskedUint32x16 OpSubUint32x16 OpSubMaskedUint32x16 OpUnsignedSignedQuadDotProdAccumulateUint32x16 @@ -5384,7 +5400,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpShiftAllLeftUint32x4 + OpShiftAllLeftMaskedUint32x4 OpShiftAllRightUint32x4 + OpShiftAllRightMaskedUint32x4 OpShiftLeftUint32x4 OpShiftLeftAndFillUpperFromUint32x4 OpShiftLeftAndFillUpperFromMaskedUint32x4 @@ -5393,8 +5411,6 @@ const ( OpShiftRightAndFillUpperFromUint32x4 OpShiftRightAndFillUpperFromMaskedUint32x4 OpShiftRightMaskedUint32x4 - OpShiftRightSignExtendedUint32x4 - OpShiftRightSignExtendedMaskedUint32x4 OpSubUint32x4 OpSubMaskedUint32x4 OpUnsignedSignedQuadDotProdAccumulateUint32x4 @@ -5437,7 +5453,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpShiftAllLeftUint32x8 + OpShiftAllLeftMaskedUint32x8 OpShiftAllRightUint32x8 + OpShiftAllRightMaskedUint32x8 OpShiftLeftUint32x8 OpShiftLeftAndFillUpperFromUint32x8 OpShiftLeftAndFillUpperFromMaskedUint32x8 @@ -5446,8 +5464,6 @@ const ( OpShiftRightAndFillUpperFromUint32x8 OpShiftRightAndFillUpperFromMaskedUint32x8 OpShiftRightMaskedUint32x8 - OpShiftRightSignExtendedUint32x8 - OpShiftRightSignExtendedMaskedUint32x8 OpSubUint32x8 OpSubMaskedUint32x8 OpUnsignedSignedQuadDotProdAccumulateUint32x8 @@ -5498,8 +5514,6 @@ const ( OpShiftRightAndFillUpperFromUint64x2 OpShiftRightAndFillUpperFromMaskedUint64x2 OpShiftRightMaskedUint64x2 - OpShiftRightSignExtendedUint64x2 - OpShiftRightSignExtendedMaskedUint64x2 OpSubUint64x2 OpSubMaskedUint64x2 OpXorUint64x2 @@ -5548,8 +5562,6 @@ const ( OpShiftRightAndFillUpperFromUint64x4 OpShiftRightAndFillUpperFromMaskedUint64x4 OpShiftRightMaskedUint64x4 - OpShiftRightSignExtendedUint64x4 - OpShiftRightSignExtendedMaskedUint64x4 OpSubUint64x4 OpSubMaskedUint64x4 OpXorUint64x4 @@ -5598,8 +5610,6 @@ const ( OpShiftRightAndFillUpperFromUint64x8 OpShiftRightAndFillUpperFromMaskedUint64x8 OpShiftRightMaskedUint64x8 - OpShiftRightSignExtendedUint64x8 - OpShiftRightSignExtendedMaskedUint64x8 OpSubUint64x8 OpSubMaskedUint64x8 OpXorUint64x8 @@ -21491,16 +21501,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW256", - argLen: 2, - asm: x86.AVPSRLW, + name: "VPSLLWMasked256", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21518,6 +21529,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRAWMasked256", + argLen: 3, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVW256", argLen: 2, @@ -21581,9 +21607,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW256", + name: "VPSRAVW256", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21627,35 +21653,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVWMasked256", - argLen: 3, - asm: x86.AVPSRLVW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVW256", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVWMasked256", argLen: 3, @@ -22012,6 +22009,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW512", + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLWMasked512", + argLen: 3, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAW512", + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAWMasked512", + argLen: 3, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVW512", argLen: 2, @@ -22075,9 +22130,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW512", + name: "VPSRAVW512", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22121,35 +22176,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVWMasked512", - argLen: 3, - asm: x86.AVPSRLVW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVW512", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVWMasked512", argLen: 3, @@ -22592,16 +22618,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW128", - argLen: 2, - asm: x86.AVPSRLW, + name: "VPSLLWMasked128", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22619,6 +22646,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRAWMasked128", + argLen: 3, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVW128", argLen: 2, @@ -22682,9 +22724,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW128", + name: "VPSRAVW128", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22728,35 +22770,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVWMasked128", - argLen: 3, - asm: x86.AVPSRLVW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVW128", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVWMasked128", argLen: 3, @@ -23241,6 +23254,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLD512", + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked512", + argLen: 3, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAD512", + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked512", + argLen: 3, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVD512", argLen: 2, @@ -23304,9 +23375,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD512", + name: "VPSRAVD512", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23350,35 +23421,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVDMasked512", - argLen: 3, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVD512", - argLen: 2, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVDMasked512", argLen: 3, @@ -23956,16 +23998,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD128", - argLen: 2, - asm: x86.AVPSRLD, + name: "VPSLLDMasked128", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23983,6 +24026,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRADMasked128", + argLen: 3, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVD128", argLen: 2, @@ -24046,9 +24104,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD128", + name: "VPSRAVD128", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24092,35 +24150,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVDMasked128", - argLen: 3, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVD128", - argLen: 2, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRAVDMasked128", argLen: 3, @@ -24697,16 +24726,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD256", - argLen: 2, - asm: x86.AVPSRLD, + name: "VPSLLDMasked256", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24724,6 +24754,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRADMasked256", + argLen: 3, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVD256", argLen: 2, @@ -24787,9 +24832,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD256", + name: "VPSRAVD256", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24833,35 +24878,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVDMasked256", - argLen: 3, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVD256", - argLen: 2, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRAVDMasked256", argLen: 3, @@ -25326,35 +25342,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLQ128", - argLen: 2, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLQMasked128", - argLen: 3, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAQ128", argLen: 2, @@ -25447,16 +25434,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ128", + name: "VPSRAVQ128", argLen: 2, - asm: x86.AVPSRLVQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25493,35 +25480,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVQMasked128", - argLen: 3, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVQ128", - argLen: 2, - asm: x86.AVPSRAVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVQMasked128", argLen: 3, @@ -25939,35 +25897,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLQ256", - argLen: 2, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLQMasked256", - argLen: 3, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAQ256", argLen: 2, @@ -26060,16 +25989,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ256", + name: "VPSRAVQ256", argLen: 2, - asm: x86.AVPSRLVQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26106,35 +26035,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVQMasked256", - argLen: 3, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVQ256", - argLen: 2, - asm: x86.AVPSRAVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVQMasked256", argLen: 3, @@ -26582,35 +26482,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLQ512", - argLen: 2, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSRLQMasked512", - argLen: 3, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAQ512", argLen: 2, @@ -26703,9 +26574,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ512", + name: "VPSRAVQ512", argLen: 2, - asm: x86.AVPSRLVQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26749,35 +26620,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVQMasked512", - argLen: 3, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVQ512", - argLen: 2, - asm: x86.AVPSRAVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVQMasked512", argLen: 3, @@ -27889,6 +27731,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLW256", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked256", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked256", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGW512", argLen: 2, @@ -28013,6 +27913,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLW512", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLWMasked512", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVW512", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked512", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGW128", argLen: 2, @@ -28137,6 +28095,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked128", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVW128", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked128", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUD512", argLen: 2, @@ -28199,6 +28215,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLD512", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked512", + argLen: 3, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVD512", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUD128", argLen: 2, @@ -28276,6 +28350,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLD128", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked128", + argLen: 3, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVD128", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked128", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUD256", argLen: 2, @@ -28353,6 +28485,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLD256", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked256", + argLen: 3, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVD256", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUQ128", argLen: 2, @@ -28431,6 +28621,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLQ128", + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQ128", + argLen: 2, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked128", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUQ256", argLen: 2, @@ -28509,6 +28757,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked256", + argLen: 3, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQ256", + argLen: 2, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUQ512", argLen: 2, @@ -28602,6 +28908,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked512", + argLen: 3, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQ512", + argLen: 2, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked512", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGB128", argLen: 2, @@ -60515,14 +60879,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt16x16", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt16x16", - argLen: 2, + name: "ShiftAllRightMaskedInt16x16", + argLen: 3, generic: true, }, { @@ -60565,16 +60934,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt16x16", - argLen: 3, - generic: true, - }, { name: "SignInt16x16", argLen: 2, @@ -60772,6 +61131,26 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftAllLeftInt16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightInt16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt16x32", argLen: 2, @@ -60812,16 +61191,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt16x32", - argLen: 3, - generic: true, - }, { name: "SubInt16x32", argLen: 2, @@ -61050,14 +61419,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt16x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt16x8", - argLen: 2, + name: "ShiftAllRightMaskedInt16x8", + argLen: 3, generic: true, }, { @@ -61100,16 +61474,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt16x8", - argLen: 3, - generic: true, - }, { name: "SignInt16x8", argLen: 2, @@ -61347,6 +61711,26 @@ var opcodeTable = [...]opInfo{ argLen: 4, generic: true, }, + { + name: "ShiftAllLeftInt32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightInt32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedInt32x16", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt32x16", argLen: 2, @@ -61387,16 +61771,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt32x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt32x16", - argLen: 3, - generic: true, - }, { name: "SubInt32x16", argLen: 2, @@ -61666,14 +62040,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt32x4", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt32x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt32x4", - argLen: 2, + name: "ShiftAllRightMaskedInt32x4", + argLen: 3, generic: true, }, { @@ -61716,16 +62095,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt32x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt32x4", - argLen: 3, - generic: true, - }, { name: "SignInt32x4", argLen: 2, @@ -62000,14 +62369,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt32x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt32x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt32x8", - argLen: 2, + name: "ShiftAllRightMaskedInt32x8", + argLen: 3, generic: true, }, { @@ -62050,16 +62424,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt32x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt32x8", - argLen: 3, - generic: true, - }, { name: "SignInt32x8", argLen: 2, @@ -62315,16 +62679,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftAllRightSignExtendedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightSignExtendedMaskedInt64x2", - argLen: 3, - generic: true, - }, { name: "ShiftLeftInt64x2", argLen: 2, @@ -62365,16 +62719,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt64x2", - argLen: 3, - generic: true, - }, { name: "SubInt64x2", argLen: 2, @@ -62615,16 +62959,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftAllRightSignExtendedInt64x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightSignExtendedMaskedInt64x4", - argLen: 3, - generic: true, - }, { name: "ShiftLeftInt64x4", argLen: 2, @@ -62665,16 +62999,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt64x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt64x4", - argLen: 3, - generic: true, - }, { name: "SubInt64x4", argLen: 2, @@ -62915,16 +63239,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftAllRightSignExtendedInt64x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightSignExtendedMaskedInt64x8", - argLen: 3, - generic: true, - }, { name: "ShiftLeftInt64x8", argLen: 2, @@ -62965,16 +63279,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt64x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt64x8", - argLen: 3, - generic: true, - }, { name: "SubInt64x8", argLen: 2, @@ -63697,11 +64001,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint16x16", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint16x16", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x16", argLen: 2, @@ -63742,16 +64056,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint16x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint16x16", - argLen: 3, - generic: true, - }, { name: "SubUint16x16", argLen: 2, @@ -63924,6 +64228,26 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftAllLeftUint16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightUint16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedUint16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x32", argLen: 2, @@ -63964,16 +64288,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint16x32", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint16x32", - argLen: 3, - generic: true, - }, { name: "SubUint16x32", argLen: 2, @@ -64172,11 +64486,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint16x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x8", argLen: 2, @@ -64217,16 +64541,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint16x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint16x8", - argLen: 3, - generic: true, - }, { name: "SubUint16x8", argLen: 2, @@ -64417,6 +64731,26 @@ var opcodeTable = [...]opInfo{ argLen: 4, generic: true, }, + { + name: "ShiftAllLeftUint32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedUint32x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightUint32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedUint32x16", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint32x16", argLen: 2, @@ -64457,16 +64791,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint32x16", - argLen: 3, - generic: true, - }, { name: "SubUint32x16", argLen: 2, @@ -64694,11 +65018,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint32x4", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint32x4", argLen: 2, @@ -64739,16 +65073,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint32x4", - argLen: 3, - generic: true, - }, { name: "SubUint32x4", argLen: 2, @@ -64976,11 +65300,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint32x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint32x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint32x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint32x8", argLen: 2, @@ -65021,16 +65355,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint32x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint32x8", - argLen: 3, - generic: true, - }, { name: "SubUint32x8", argLen: 2, @@ -65299,16 +65623,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint64x2", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint64x2", - argLen: 3, - generic: true, - }, { name: "SubUint64x2", argLen: 2, @@ -65567,16 +65881,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint64x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint64x4", - argLen: 3, - generic: true, - }, { name: "SubUint64x4", argLen: 2, @@ -65835,16 +66139,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint64x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint64x8", - argLen: 3, - generic: true, - }, { name: "SubUint64x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d258b3bd0e7f97..d78c9212cbae09 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4131,9 +4131,15 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftInt16x16: v.Op = OpAMD64VPSLLW256 return true + case OpShiftAllLeftInt16x32: + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftInt16x8: v.Op = OpAMD64VPSLLW128 return true + case OpShiftAllLeftInt32x16: + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftInt32x4: v.Op = OpAMD64VPSLLD128 return true @@ -4149,12 +4155,36 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftInt64x8: v.Op = OpAMD64VPSLLQ512 return true + case OpShiftAllLeftMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v) + case OpShiftAllLeftMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v) + case OpShiftAllLeftMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v) + case OpShiftAllLeftMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v) + case OpShiftAllLeftMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v) + case OpShiftAllLeftMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v) case OpShiftAllLeftMaskedInt64x2: return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v) case OpShiftAllLeftMaskedInt64x4: return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v) case OpShiftAllLeftMaskedInt64x8: return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v) + case OpShiftAllLeftMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v) + case OpShiftAllLeftMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v) + case OpShiftAllLeftMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v) + case OpShiftAllLeftMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v) + case OpShiftAllLeftMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v) + case OpShiftAllLeftMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v) case OpShiftAllLeftMaskedUint64x2: return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v) case OpShiftAllLeftMaskedUint64x4: @@ -4164,9 +4194,15 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftUint16x16: v.Op = OpAMD64VPSLLW256 return true + case OpShiftAllLeftUint16x32: + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftUint16x8: v.Op = OpAMD64VPSLLW128 return true + case OpShiftAllLeftUint32x16: + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftUint32x4: v.Op = OpAMD64VPSLLD128 return true @@ -4273,71 +4309,80 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: - v.Op = OpAMD64VPSRLW256 + v.Op = OpAMD64VPSRAW256 + return true + case OpShiftAllRightInt16x32: + v.Op = OpAMD64VPSRAW512 return true case OpShiftAllRightInt16x8: - v.Op = OpAMD64VPSRLW128 + v.Op = OpAMD64VPSRAW128 + return true + case OpShiftAllRightInt32x16: + v.Op = OpAMD64VPSRAD512 return true case OpShiftAllRightInt32x4: - v.Op = OpAMD64VPSRLD128 + v.Op = OpAMD64VPSRAD128 return true case OpShiftAllRightInt32x8: - v.Op = OpAMD64VPSRLD256 + v.Op = OpAMD64VPSRAD256 return true case OpShiftAllRightInt64x2: - v.Op = OpAMD64VPSRLQ128 + v.Op = OpAMD64VPSRAQ128 return true case OpShiftAllRightInt64x4: - v.Op = OpAMD64VPSRLQ256 + v.Op = OpAMD64VPSRAQ256 return true case OpShiftAllRightInt64x8: - v.Op = OpAMD64VPSRLQ512 + v.Op = OpAMD64VPSRAQ512 return true + case OpShiftAllRightMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v) + case OpShiftAllRightMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v) + case OpShiftAllRightMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v) + case OpShiftAllRightMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v) + case OpShiftAllRightMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v) + case OpShiftAllRightMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v) case OpShiftAllRightMaskedInt64x2: return rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v) case OpShiftAllRightMaskedInt64x4: return rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v) case OpShiftAllRightMaskedInt64x8: return rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v) + case OpShiftAllRightMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v) + case OpShiftAllRightMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v) + case OpShiftAllRightMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v) + case OpShiftAllRightMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v) + case OpShiftAllRightMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v) + case OpShiftAllRightMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v) case OpShiftAllRightMaskedUint64x2: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v) case OpShiftAllRightMaskedUint64x4: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v) case OpShiftAllRightMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) - case OpShiftAllRightSignExtendedInt16x16: - v.Op = OpAMD64VPSRAW256 - return true - case OpShiftAllRightSignExtendedInt16x8: - v.Op = OpAMD64VPSRAW128 - return true - case OpShiftAllRightSignExtendedInt32x4: - v.Op = OpAMD64VPSRAD128 - return true - case OpShiftAllRightSignExtendedInt32x8: - v.Op = OpAMD64VPSRAD256 - return true - case OpShiftAllRightSignExtendedInt64x2: - v.Op = OpAMD64VPSRAQ128 - return true - case OpShiftAllRightSignExtendedInt64x4: - v.Op = OpAMD64VPSRAQ256 - return true - case OpShiftAllRightSignExtendedInt64x8: - v.Op = OpAMD64VPSRAQ512 - return true - case OpShiftAllRightSignExtendedMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v) - case OpShiftAllRightSignExtendedMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v) - case OpShiftAllRightSignExtendedMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v) case OpShiftAllRightUint16x16: v.Op = OpAMD64VPSRLW256 return true + case OpShiftAllRightUint16x32: + v.Op = OpAMD64VPSRLW512 + return true case OpShiftAllRightUint16x8: v.Op = OpAMD64VPSRLW128 return true + case OpShiftAllRightUint32x16: + v.Op = OpAMD64VPSRLD512 + return true case OpShiftAllRightUint32x4: v.Op = OpAMD64VPSRLD128 return true @@ -4624,31 +4669,31 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDVQ512 return true case OpShiftRightInt16x16: - v.Op = OpAMD64VPSRLVW256 + v.Op = OpAMD64VPSRAVW256 return true case OpShiftRightInt16x32: - v.Op = OpAMD64VPSRLVW512 + v.Op = OpAMD64VPSRAVW512 return true case OpShiftRightInt16x8: - v.Op = OpAMD64VPSRLVW128 + v.Op = OpAMD64VPSRAVW128 return true case OpShiftRightInt32x16: - v.Op = OpAMD64VPSRLVD512 + v.Op = OpAMD64VPSRAVD512 return true case OpShiftRightInt32x4: - v.Op = OpAMD64VPSRLVD128 + v.Op = OpAMD64VPSRAVD128 return true case OpShiftRightInt32x8: - v.Op = OpAMD64VPSRLVD256 + v.Op = OpAMD64VPSRAVD256 return true case OpShiftRightInt64x2: - v.Op = OpAMD64VPSRLVQ128 + v.Op = OpAMD64VPSRAVQ128 return true case OpShiftRightInt64x4: - v.Op = OpAMD64VPSRLVQ256 + v.Op = OpAMD64VPSRAVQ256 return true case OpShiftRightInt64x8: - v.Op = OpAMD64VPSRLVQ512 + v.Op = OpAMD64VPSRAVQ512 return true case OpShiftRightMaskedInt16x16: return rewriteValueAMD64_OpShiftRightMaskedInt16x16(v) @@ -4686,96 +4731,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpShiftRightMaskedUint64x4(v) case OpShiftRightMaskedUint64x8: return rewriteValueAMD64_OpShiftRightMaskedUint64x8(v) - case OpShiftRightSignExtendedInt16x16: - v.Op = OpAMD64VPSRAVW256 - return true - case OpShiftRightSignExtendedInt16x32: - v.Op = OpAMD64VPSRAVW512 - return true - case OpShiftRightSignExtendedInt16x8: - v.Op = OpAMD64VPSRAVW128 - return true - case OpShiftRightSignExtendedInt32x16: - v.Op = OpAMD64VPSRAVD512 - return true - case OpShiftRightSignExtendedInt32x4: - v.Op = OpAMD64VPSRAVD128 - return true - case OpShiftRightSignExtendedInt32x8: - v.Op = OpAMD64VPSRAVD256 - return true - case OpShiftRightSignExtendedInt64x2: - v.Op = OpAMD64VPSRAVQ128 - return true - case OpShiftRightSignExtendedInt64x4: - v.Op = OpAMD64VPSRAVQ256 - return true - case OpShiftRightSignExtendedInt64x8: - v.Op = OpAMD64VPSRAVQ512 - return true - case OpShiftRightSignExtendedMaskedInt16x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v) - case OpShiftRightSignExtendedMaskedInt16x32: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v) - case OpShiftRightSignExtendedMaskedInt16x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v) - case OpShiftRightSignExtendedMaskedInt32x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v) - case OpShiftRightSignExtendedMaskedInt32x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v) - case OpShiftRightSignExtendedMaskedInt32x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v) - case OpShiftRightSignExtendedMaskedInt64x2: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v) - case OpShiftRightSignExtendedMaskedInt64x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v) - case OpShiftRightSignExtendedMaskedInt64x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v) - case OpShiftRightSignExtendedMaskedUint16x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v) - case OpShiftRightSignExtendedMaskedUint16x32: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v) - case OpShiftRightSignExtendedMaskedUint16x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v) - case OpShiftRightSignExtendedMaskedUint32x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v) - case OpShiftRightSignExtendedMaskedUint32x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v) - case OpShiftRightSignExtendedMaskedUint32x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v) - case OpShiftRightSignExtendedMaskedUint64x2: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v) - case OpShiftRightSignExtendedMaskedUint64x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v) - case OpShiftRightSignExtendedMaskedUint64x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v) - case OpShiftRightSignExtendedUint16x16: - v.Op = OpAMD64VPSRAVW256 - return true - case OpShiftRightSignExtendedUint16x32: - v.Op = OpAMD64VPSRAVW512 - return true - case OpShiftRightSignExtendedUint16x8: - v.Op = OpAMD64VPSRAVW128 - return true - case OpShiftRightSignExtendedUint32x16: - v.Op = OpAMD64VPSRAVD512 - return true - case OpShiftRightSignExtendedUint32x4: - v.Op = OpAMD64VPSRAVD128 - return true - case OpShiftRightSignExtendedUint32x8: - v.Op = OpAMD64VPSRAVD256 - return true - case OpShiftRightSignExtendedUint64x2: - v.Op = OpAMD64VPSRAVQ128 - return true - case OpShiftRightSignExtendedUint64x4: - v.Op = OpAMD64VPSRAVQ256 - return true - case OpShiftRightSignExtendedUint64x8: - v.Op = OpAMD64VPSRAVQ512 - return true case OpShiftRightUint16x16: v.Op = OpAMD64VPSRLVW256 return true @@ -48631,6 +48586,114 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bo return true } } +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -48685,6 +48748,114 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49099,18 +49270,126 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) b return true } } +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt16x16 x y mask) + // result: (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt16x32 x y mask) + // result: (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt16x8 x y mask) + // result: (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt32x16 x y mask) + // result: (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRADMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt32x4 x y mask) + // result: (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRADMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt32x8 x y mask) + // result: (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRADMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) + v.reset(OpAMD64VPSRAQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -49123,12 +49402,12 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) + v.reset(OpAMD64VPSRAQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -49141,120 +49420,174 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) + v.reset(OpAMD64VPSRAQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllRightMaskedUint16x16 x y mask) + // result: (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPSRLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllRightMaskedUint16x32 x y mask) + // result: (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSRLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllRightMaskedUint16x8 x y mask) + // result: (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPSRLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightSignExtendedMaskedInt64x2 x y mask) - // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllRightMaskedUint32x16 x y mask) + // result: (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked128) + v.reset(OpAMD64VPSRLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedUint32x4 x y mask) + // result: (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedUint32x8 x y mask) + // result: (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedUint64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightSignExtendedMaskedInt64x4 x y mask) - // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllRightMaskedUint64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked256) + v.reset(OpAMD64VPSRLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightSignExtendedMaskedInt64x8 x y mask) - // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllRightMaskedUint64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked512) + v.reset(OpAMD64VPSRLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50311,12 +50644,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt16x16 x y mask) - // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked256) + v.reset(OpAMD64VPSRAVWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50329,12 +50662,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt16x32 x y mask) - // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512) + v.reset(OpAMD64VPSRAVWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50347,12 +50680,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt16x8 x y mask) - // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked128) + v.reset(OpAMD64VPSRAVWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50365,12 +50698,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt32x16 x y mask) - // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512) + v.reset(OpAMD64VPSRAVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50383,12 +50716,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt32x4 x y mask) - // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked128) + v.reset(OpAMD64VPSRAVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50401,12 +50734,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt32x8 x y mask) - // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked256) + v.reset(OpAMD64VPSRAVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50419,12 +50752,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt64x2 x y mask) - // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked128) + v.reset(OpAMD64VPSRAVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50437,12 +50770,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt64x4 x y mask) - // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked256) + v.reset(OpAMD64VPSRAVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50455,12 +50788,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt64x8 x y mask) - // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512) + v.reset(OpAMD64VPSRAVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50629,330 +50962,6 @@ func rewriteValueAMD64_OpShiftRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index ffd341d6aba47e..085c0b8d995b6c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1250,15 +1250,19 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) @@ -1298,23 +1302,39 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) @@ -1354,22 +1374,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x32, types.TypeVec512), sys.AMD64) @@ -1514,42 +1536,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e98aca1abfba64..38ccfaac8c6e37 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -6883,6 +6883,11 @@ func (x Int16x8) ShiftAllLeft(y uint64) Int16x8 // Asm: VPSLLW, CPU Feature: AVX2 func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeft(y uint64) Int16x32 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX @@ -6893,6 +6898,11 @@ func (x Int32x4) ShiftAllLeft(y uint64) Int32x4 // Asm: VPSLLD, CPU Feature: AVX2 func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeft(y uint64) Int32x16 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX @@ -6918,6 +6928,11 @@ func (x Uint16x8) ShiftAllLeft(y uint64) Uint16x8 // Asm: VPSLLW, CPU Feature: AVX2 func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeft(y uint64) Uint16x32 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX @@ -6928,6 +6943,11 @@ func (x Uint32x4) ShiftAllLeft(y uint64) Uint32x4 // Asm: VPSLLD, CPU Feature: AVX2 func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeft(y uint64) Uint32x16 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX @@ -7237,6 +7257,36 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z /* ShiftAllLeftMasked */ +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Int16x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Int16x16 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Int16x32 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Int32x4 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Int32x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Int32x16 + // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX @@ -7252,6 +7302,36 @@ func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Uint16x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Uint16x16 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Uint16x32 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Uint32x4 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Uint32x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Uint32x16 + // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX @@ -7269,39 +7349,49 @@ func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftAllRight */ -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLW, CPU Feature: AVX +// Asm: VPSRAW, CPU Feature: AVX func (x Int16x8) ShiftAllRight(y uint64) Int16x8 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLW, CPU Feature: AVX2 +// Asm: VPSRAW, CPU Feature: AVX2 func (x Int16x16) ShiftAllRight(y uint64) Int16x16 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLD, CPU Feature: AVX +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRight(y uint64) Int16x32 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAD, CPU Feature: AVX func (x Int32x4) ShiftAllRight(y uint64) Int32x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLD, CPU Feature: AVX2 +// Asm: VPSRAD, CPU Feature: AVX2 func (x Int32x8) ShiftAllRight(y uint64) Int32x8 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRight(y uint64) Int32x16 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRight(y uint64) Int64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX2 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRight(y uint64) Int64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRight(y uint64) Int64x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7314,6 +7404,11 @@ func (x Uint16x8) ShiftAllRight(y uint64) Uint16x8 // Asm: VPSRLW, CPU Feature: AVX2 func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRight(y uint64) Uint16x32 + // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX @@ -7324,6 +7419,11 @@ func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 // Asm: VPSRLD, CPU Feature: AVX2 func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRight(y uint64) Uint32x16 + // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX @@ -7633,89 +7733,95 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z /* ShiftAllRightMasked */ -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Int16x8 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Int16x16 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Int16x32 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Int32x4 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Int32x8 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Int32x16 -/* ShiftAllRightSignExtended */ +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX -func (x Int16x8) ShiftAllRightSignExtended(y uint64) Int16x8 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX2 -func (x Int16x16) ShiftAllRightSignExtended(y uint64) Int16x16 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAD, CPU Feature: AVX -func (x Int32x4) ShiftAllRightSignExtended(y uint64) Int32x4 +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Uint16x8 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAD, CPU Feature: AVX2 -func (x Int32x8) ShiftAllRightSignExtended(y uint64) Int32x8 +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Uint16x16 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightSignExtended(y uint64) Int64x2 +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Uint16x32 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Uint32x4 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Uint32x8 -/* ShiftAllRightSignExtendedMasked */ +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Uint32x16 -// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x2) Int64x2 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 -// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x4) Int64x4 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 -// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x8) Int64x8 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftLeft */ @@ -8123,49 +8229,49 @@ func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ShiftRight */ -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRight(y Int16x8) Int16x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRight(y Int16x16) Int16x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRight(y Int16x32) Int16x32 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX2 +// Asm: VPSRAVD, CPU Feature: AVX2 func (x Int32x4) ShiftRight(y Int32x4) Int32x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX2 +// Asm: VPSRAVD, CPU Feature: AVX2 func (x Int32x8) ShiftRight(y Int32x8) Int32x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRight(y Int32x16) Int32x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX2 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRight(y Int64x2) Int64x2 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX2 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRight(y Int64x4) Int64x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRight(y Int64x8) Int64x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8435,49 +8541,49 @@ func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mas /* ShiftRightMasked */ -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8525,190 +8631,6 @@ func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 -/* ShiftRightSignExtended */ - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftRightSignExtended(y Int16x8) Int16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftRightSignExtended(y Int16x16) Int16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftRightSignExtended(y Int16x32) Int16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Int32x4) ShiftRightSignExtended(y Int32x4) Int32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Int32x8) ShiftRightSignExtended(y Int32x8) Int32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftRightSignExtended(y Int32x16) Int32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftRightSignExtended(y Int64x2) Int64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftRightSignExtended(y Int64x4) Int64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftRightSignExtended(y Int64x8) Int64x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftRightSignExtended(y Uint16x8) Uint16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 - -/* ShiftRightSignExtendedMasked */ - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftRightSignExtendedMasked(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftRightSignExtendedMasked(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftRightSignExtendedMasked(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftRightSignExtendedMasked(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftRightSignExtendedMasked(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftRightSignExtendedMasked(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftRightSignExtendedMasked(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftRightSignExtendedMasked(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftRightSignExtendedMasked(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftRightSignExtendedMasked(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftRightSignExtendedMasked(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightSignExtendedMasked(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftRightSignExtendedMasked(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftRightSignExtendedMasked(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightSignExtendedMasked(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightSignExtendedMasked(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightSignExtendedMasked(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightSignExtendedMasked(y Uint64x8, z Mask64x8) Uint64x8 - /* Sign */ // Sign returns the product of the first operand with -1, 0, or 1, diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 62096a76cf7ab1..15e5c45097c526 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -2055,8 +2055,6 @@ func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2101,8 +2099,6 @@ func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) @@ -2356,8 +2352,6 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2402,8 +2396,6 @@ func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) @@ -2643,8 +2635,6 @@ func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -2685,8 +2675,6 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) @@ -2934,8 +2922,6 @@ func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2984,8 +2970,6 @@ func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) case "XorMasked": @@ -3311,8 +3295,6 @@ func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -3361,8 +3343,6 @@ func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) case "XorMasked": @@ -3684,8 +3664,6 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3732,8 +3710,6 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) case "XorMasked": @@ -4036,8 +4012,6 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4086,8 +4060,6 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) case "XorMasked": @@ -4292,8 +4264,6 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4342,8 +4312,6 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) case "XorMasked": @@ -4548,8 +4516,6 @@ func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4598,8 +4564,6 @@ func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) case "XorMasked": @@ -5478,8 +5442,6 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5522,8 +5484,6 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) @@ -5726,8 +5686,6 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5770,8 +5728,6 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) @@ -5964,8 +5920,6 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -6006,8 +5960,6 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) @@ -6206,8 +6158,6 @@ func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6252,8 +6202,6 @@ func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) case "XorMasked": @@ -6524,8 +6472,6 @@ func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6570,8 +6516,6 @@ func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) case "XorMasked": @@ -6838,8 +6782,6 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6884,8 +6826,6 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) case "XorMasked": @@ -7133,8 +7073,6 @@ func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -7181,8 +7119,6 @@ func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) case "XorMasked": @@ -7381,8 +7317,6 @@ func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -7429,8 +7363,6 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) case "XorMasked": @@ -7629,8 +7561,6 @@ func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -7677,8 +7607,6 @@ func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) case "XorMasked": @@ -7884,7 +7812,5 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // ShiftAllRightAndFillUpperFrom // ShiftAllRightAndFillUpperFromMasked // ShiftAllRightMasked -// ShiftAllRightSignExtended -// ShiftAllRightSignExtendedMasked // TruncWithPrecision // TruncWithPrecisionMasked From 3f789721d6298b7f4406a0106670c4d4ad70a28d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Sat, 12 Jul 2025 08:13:04 +0000 Subject: [PATCH 082/139] [dev.simd] cmd/compile: mark SIMD types non-fat This CL fixes the merge locals error. The culprit is that liveness analysis wrongly mark SIMD structs fat, hence making `StoreReg` of SIMD vectors not a varkill effect, making the liveness range of SIMD vectors not closed correctly, further making mergelocals merged 2 concurrently-live SIMD vectors. Is looks like mergelocals will treat the live range as one instruction if it's not closed: [st, st+1). Should we make it [st, +inf) instead? So that we won't have similar errors in the future. Also, I feel we really need to examine every "case types.TSTRUCT" or "if t.Kind() == types.TSTRUCT" in the codebase correctly for SIMD types... Change-Id: I2f4f4f36a890bd317d582cfa73a8f6a789382d91 Reviewed-on: https://go-review.googlesource.com/c/go/+/687775 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/liveness/plive.go | 3 +++ src/cmd/compile/internal/ssa/func.go | 8 -------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 5a2a22ee8f5c82..b9d3030e96ab77 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -1534,6 +1534,9 @@ func isfat(t *types.Type) bool { } return true case types.TSTRUCT: + if t.IsSIMD() { + return false + } // Struct with 1 field, check if field is fat if t.NumFields() == 1 { return isfat(t.Field(0).Type) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 01ce89cf47e315..5736f0b8126484 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -850,13 +850,6 @@ func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name { // items larger than what CanSSA would allow (approximateky, we disallow things // marked as open defer slots so as to avoid complicating liveness // analysis. -// -// TODO: make SIMD variables mergible. -// -// Right now this check excludes SIMD vars because sometimes two live SIMD -// vectors will be put into the same partition by mergelocals, we need to figure -// out why because these vectors are big and should be merged when possible. -// Details in CL 687375. func IsMergeCandidate(n *ir.Name) bool { if base.Debug.MergeLocals == 0 || base.Flag.N != 0 || @@ -864,7 +857,6 @@ func IsMergeCandidate(n *ir.Name) bool { n.Type().Size() <= int64(3*types.PtrSize) || n.Addrtaken() || n.NonMergeable() || - n.Type().IsSIMD() || n.OpenDeferSlot() { return false } From 08ffd66ab25d55b5fe816be0b2a65bb4cc91f3bd Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 20:03:00 +0000 Subject: [PATCH 083/139] [dev.simd] simd: updates CPU Feature in doc This CL is generated by CL 687655. Change-Id: I12d7516a9a51a1d65ec3aa6f0fd754248df1d6de Reviewed-on: https://go-review.googlesource.com/c/go/+/687675 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/ops_amd64.go | 2592 ++++++++++++++++++++--------------------- 1 file changed, 1296 insertions(+), 1296 deletions(-) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 38ccfaac8c6e37..2c17300ae44d76 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -18,7 +18,7 @@ func (x Int8x32) Absolute() Int8x32 // Absolute computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x64) Absolute() Int8x64 // Absolute computes the absolute value of each element. @@ -33,7 +33,7 @@ func (x Int16x16) Absolute() Int16x16 // Absolute computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x32) Absolute() Int16x32 // Absolute computes the absolute value of each element. @@ -48,84 +48,84 @@ func (x Int32x8) Absolute() Int32x8 // Absolute computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x16) Absolute() Int32x16 // Absolute computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x2) Absolute() Int64x2 // Absolute computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x4) Absolute() Int64x4 // Absolute computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x8) Absolute() Int64x8 /* AbsoluteMasked */ // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 /* Add */ @@ -142,7 +142,7 @@ func (x Float32x8) Add(y Float32x8) Float32x8 // Add adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x16) Add(y Float32x16) Float32x16 // Add adds corresponding elements of two vectors. @@ -157,7 +157,7 @@ func (x Float64x4) Add(y Float64x4) Float64x4 // Add adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x8) Add(y Float64x8) Float64x8 // Add adds corresponding elements of two vectors. @@ -172,7 +172,7 @@ func (x Int8x32) Add(y Int8x32) Int8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x64) Add(y Int8x64) Int8x64 // Add adds corresponding elements of two vectors. @@ -187,7 +187,7 @@ func (x Int16x16) Add(y Int16x16) Int16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x32) Add(y Int16x32) Int16x32 // Add adds corresponding elements of two vectors. @@ -202,7 +202,7 @@ func (x Int32x8) Add(y Int32x8) Int32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x16) Add(y Int32x16) Int32x16 // Add adds corresponding elements of two vectors. @@ -217,7 +217,7 @@ func (x Int64x4) Add(y Int64x4) Int64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x8) Add(y Int64x8) Int64x8 // Add adds corresponding elements of two vectors. @@ -232,7 +232,7 @@ func (x Uint8x32) Add(y Uint8x32) Uint8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x64) Add(y Uint8x64) Uint8x64 // Add adds corresponding elements of two vectors. @@ -247,7 +247,7 @@ func (x Uint16x16) Add(y Uint16x16) Uint16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x32) Add(y Uint16x32) Uint16x32 // Add adds corresponding elements of two vectors. @@ -262,7 +262,7 @@ func (x Uint32x8) Add(y Uint32x8) Uint32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x16) Add(y Uint32x16) Uint32x16 // Add adds corresponding elements of two vectors. @@ -277,159 +277,159 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) Add(y Uint64x8) Uint64x8 /* AddMasked */ // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 /* AddSub */ @@ -488,7 +488,7 @@ func (x Int32x8) And(y Int32x8) Int32x8 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) And(y Int32x16) Int32x16 // And performs a bitwise AND operation between two vectors. @@ -503,7 +503,7 @@ func (x Int64x4) And(y Int64x4) Int64x4 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) And(y Int64x8) Int64x8 // And performs a bitwise AND operation between two vectors. @@ -538,7 +538,7 @@ func (x Uint32x8) And(y Uint32x8) Uint32x8 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) And(y Uint32x16) Uint32x16 // And performs a bitwise AND operation between two vectors. @@ -553,69 +553,69 @@ func (x Uint64x4) And(y Uint64x4) Uint64x4 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 /* AndNot */ @@ -652,7 +652,7 @@ func (x Int32x8) AndNot(y Int32x8) Int32x8 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNot(y Int32x16) Int32x16 // AndNot performs a bitwise AND NOT operation between two vectors. @@ -667,7 +667,7 @@ func (x Int64x4) AndNot(y Int64x4) Int64x4 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNot(y Int64x8) Int64x8 // AndNot performs a bitwise AND NOT operation between two vectors. @@ -702,7 +702,7 @@ func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 // AndNot performs a bitwise AND NOT operation between two vectors. @@ -717,133 +717,133 @@ func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ApproximateReciprocal */ // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocal() Float32x4 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocal() Float32x8 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocal() Float32x16 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocal() Float64x2 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocal() Float64x4 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocal() Float64x8 /* ApproximateReciprocalMasked */ // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 /* ApproximateReciprocalOfSqrt */ @@ -860,54 +860,54 @@ func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 /* ApproximateReciprocalOfSqrtMasked */ // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 /* Average */ @@ -924,7 +924,7 @@ func (x Uint8x32) Average(y Uint8x32) Uint8x32 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x64) Average(y Uint8x64) Uint8x64 // Average computes the rounded average of corresponding elements. @@ -939,39 +939,39 @@ func (x Uint16x16) Average(y Uint16x16) Uint16x16 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* AverageMasked */ // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 /* Ceil */ @@ -1002,42 +1002,42 @@ func (x Float64x4) Ceil() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 /* CeilWithPrecisionMasked */ @@ -1046,42 +1046,42 @@ func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithCeilWithPrecision */ @@ -1090,42 +1090,42 @@ func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithCeilWithPrecision(prec uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithCeilWithPrecision(prec uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithCeilWithPrecision(prec uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithCeilWithPrecision(prec uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithCeilWithPrecision(prec uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 /* DiffWithCeilWithPrecisionMasked */ @@ -1134,42 +1134,42 @@ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithFloorWithPrecision */ @@ -1178,42 +1178,42 @@ func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithFloorWithPrecision(prec uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithFloorWithPrecision(prec uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithFloorWithPrecision(prec uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithFloorWithPrecision(prec uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithFloorWithPrecision(prec uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 /* DiffWithFloorWithPrecisionMasked */ @@ -1222,42 +1222,42 @@ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithRoundWithPrecision */ @@ -1266,42 +1266,42 @@ func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Floa // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithRoundWithPrecision(prec uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithRoundWithPrecision(prec uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithRoundWithPrecision(prec uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithRoundWithPrecision(prec uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithRoundWithPrecision(prec uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 /* DiffWithRoundWithPrecisionMasked */ @@ -1310,42 +1310,42 @@ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithTruncWithPrecision */ @@ -1354,42 +1354,42 @@ func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Floa // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithTruncWithPrecision(prec uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithTruncWithPrecision(prec uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithTruncWithPrecision(prec uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithTruncWithPrecision(prec uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithTruncWithPrecision(prec uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 /* DiffWithTruncWithPrecisionMasked */ @@ -1398,42 +1398,42 @@ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* Div */ @@ -1450,7 +1450,7 @@ func (x Float32x8) Div(y Float32x8) Float32x8 // Div divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x16) Div(y Float32x16) Float32x16 // Div divides elements of two vectors. @@ -1465,39 +1465,39 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Div divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) Div(y Float64x8) Float64x8 /* DivMasked */ // DivMasked divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 /* DotProdBroadcast */ @@ -1601,7 +1601,7 @@ func (x Float32x8) Equal(y Float32x8) Mask32x8 // Equal compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Equal(y Float32x16) Mask32x16 // Equal compares for equality. @@ -1616,199 +1616,199 @@ func (x Float64x4) Equal(y Float64x4) Mask64x4 // Equal compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Equal(y Float64x8) Mask64x8 // Equal compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Equal(y Int8x64) Mask8x64 // Equal compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Equal(y Int16x32) Mask16x32 // Equal compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Equal(y Int32x16) Mask32x16 // Equal compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Equal(y Int64x8) Mask64x8 // Equal compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Equal(y Uint8x64) Mask8x64 // Equal compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Equal(y Uint16x32) Mask16x32 // Equal compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* EqualMasked */ // EqualMasked compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* Floor */ @@ -1839,42 +1839,42 @@ func (x Float64x4) Floor() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 /* FloorWithPrecisionMasked */ @@ -1883,234 +1883,234 @@ func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* FusedMultiplyAdd */ // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddMasked */ // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* FusedMultiplyAddSub */ // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddSubMasked */ // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* FusedMultiplySubAdd */ // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplySubAddMasked */ // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* GaloisFieldAffineTransform */ @@ -2122,7 +2122,7 @@ func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): @@ -2132,7 +2132,7 @@ func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): @@ -2142,7 +2142,7 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 /* GaloisFieldAffineTransformInverse */ @@ -2155,7 +2155,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 // GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), @@ -2166,7 +2166,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x1 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 // GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), @@ -2177,7 +2177,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x3 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 /* GaloisFieldAffineTransformInverseMasked */ @@ -2190,7 +2190,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), @@ -2201,7 +2201,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), @@ -2212,7 +2212,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 /* GaloisFieldAffineTransformMasked */ @@ -2224,7 +2224,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): @@ -2234,7 +2234,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): @@ -2244,7 +2244,7 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 /* GaloisFieldMul */ @@ -2252,19 +2252,19 @@ func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x // GaloisFieldMul computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 // GaloisFieldMul computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 // GaloisFieldMul computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 /* GaloisFieldMulMasked */ @@ -2272,19 +2272,19 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 /* Get128 */ @@ -2365,14 +2365,14 @@ func (x Uint64x4) Get128(index uint8) Uint64x2 // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRB, CPU Feature: AVX512EVEX +// Asm: VPEXTRB, CPU Feature: AVX512BW func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRW, CPU Feature: AVX512EVEX +// Asm: VPEXTRW, CPU Feature: AVX512BW func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. @@ -2393,14 +2393,14 @@ func (x Int64x2) GetElem(index uint8) int64 // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRB, CPU Feature: AVX512EVEX +// Asm: VPEXTRB, CPU Feature: AVX512BW func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRW, CPU Feature: AVX512EVEX +// Asm: VPEXTRW, CPU Feature: AVX512BW func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. @@ -2471,7 +2471,7 @@ func (x Float32x8) Greater(y Float32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Greater(y Float32x16) Mask32x16 // Greater compares for greater than. @@ -2486,87 +2486,87 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Greater(y Float64x8) Mask64x8 // Greater compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Greater(y Int8x64) Mask8x64 // Greater compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Greater(y Int16x32) Mask16x32 // Greater compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Greater(y Int32x16) Mask32x16 // Greater compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) Greater(y Uint8x16) Mask8x16 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) Greater(y Uint8x32) Mask8x32 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Greater(y Uint8x64) Mask8x64 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) Greater(y Uint16x8) Mask16x8 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) Greater(y Uint16x16) Mask16x16 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Greater(y Uint16x32) Mask16x32 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) Greater(y Uint32x4) Mask32x4 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) Greater(y Uint32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Greater(y Uint32x16) Mask32x16 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) Greater(y Uint64x2) Mask64x2 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) Greater(y Uint64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Greater(y Uint64x8) Mask64x8 /* GreaterEqual */ @@ -2583,7 +2583,7 @@ func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 // GreaterEqual compares for greater than or equal. @@ -2598,431 +2598,431 @@ func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* GreaterEqualMasked */ // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* GreaterMasked */ // GreaterMasked compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 /* IsNan */ @@ -3039,7 +3039,7 @@ func (x Float32x8) IsNan(y Float32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) IsNan(y Float32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). @@ -3054,39 +3054,39 @@ func (x Float64x4) IsNan(y Float64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* IsNanMasked */ // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 /* Less */ @@ -3103,7 +3103,7 @@ func (x Float32x8) Less(y Float32x8) Mask32x8 // Less compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. @@ -3118,127 +3118,127 @@ func (x Float64x4) Less(y Float64x4) Mask64x4 // Less compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Less(y Float64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) Less(y Int8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) Less(y Int8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Less(y Int8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) Less(y Int16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) Less(y Int16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Less(y Int16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) Less(y Int32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) Less(y Int32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Less(y Int32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) Less(y Int64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) Less(y Int64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Less(y Int64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) Less(y Uint8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) Less(y Uint8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Less(y Uint8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) Less(y Uint16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Less(y Uint16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) Less(y Uint32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) Less(y Uint32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Less(y Uint32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) Less(y Uint64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) Less(y Uint64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Less(y Uint64x8) Mask64x8 /* LessEqual */ @@ -3255,7 +3255,7 @@ func (x Float32x8) LessEqual(y Float32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessEqual(y Float32x16) Mask32x16 // LessEqual compares for less than or equal. @@ -3270,431 +3270,431 @@ func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessEqual(y Float64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessEqual(y Int8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessEqual(y Int8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessEqual(y Int16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessEqual(y Int16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessEqual(y Int16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessEqual(y Int32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessEqual(y Int32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessEqual(y Int32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessEqual(y Int64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessEqual(y Int64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessEqual(y Int64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 /* LessEqualMasked */ // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* LessMasked */ // LessMasked compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 // LessMasked compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 // LessMasked compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 // LessMasked compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 // LessMasked compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 // LessMasked compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 // LessMasked compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 // LessMasked compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 // LessMasked compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 // LessMasked compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 // LessMasked compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 // LessMasked compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 // LessMasked compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 // LessMasked compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 // LessMasked compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 // LessMasked compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 // LessMasked compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 // LessMasked compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 // LessMasked compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 // LessMasked compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 // LessMasked compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 // LessMasked compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 // LessMasked compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 // LessMasked compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 // LessMasked compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 // LessMasked compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 // LessMasked compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 // LessMasked compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 // LessMasked compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 // LessMasked compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 /* Max */ @@ -3711,7 +3711,7 @@ func (x Float32x8) Max(y Float32x8) Float32x8 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x16) Max(y Float32x16) Float32x16 // Max computes the maximum of corresponding elements. @@ -3726,7 +3726,7 @@ func (x Float64x4) Max(y Float64x4) Float64x4 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x8) Max(y Float64x8) Float64x8 // Max computes the maximum of corresponding elements. @@ -3741,7 +3741,7 @@ func (x Int8x32) Max(y Int8x32) Int8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x64) Max(y Int8x64) Int8x64 // Max computes the maximum of corresponding elements. @@ -3756,7 +3756,7 @@ func (x Int16x16) Max(y Int16x16) Int16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x32) Max(y Int16x32) Int16x32 // Max computes the maximum of corresponding elements. @@ -3771,22 +3771,22 @@ func (x Int32x8) Max(y Int32x8) Int32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x16) Max(y Int32x16) Int32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x2) Max(y Int64x2) Int64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x4) Max(y Int64x4) Int64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x8) Max(y Int64x8) Int64x8 // Max computes the maximum of corresponding elements. @@ -3801,7 +3801,7 @@ func (x Uint8x32) Max(y Uint8x32) Uint8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x64) Max(y Uint8x64) Uint8x64 // Max computes the maximum of corresponding elements. @@ -3816,7 +3816,7 @@ func (x Uint16x16) Max(y Uint16x16) Uint16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x32) Max(y Uint16x32) Uint16x32 // Max computes the maximum of corresponding elements. @@ -3831,174 +3831,174 @@ func (x Uint32x8) Max(y Uint32x8) Uint32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x16) Max(y Uint32x16) Uint32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x2) Max(y Uint64x2) Uint64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x4) Max(y Uint64x4) Uint64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x8) Max(y Uint64x8) Uint64x8 /* MaxMasked */ // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Min */ @@ -4015,7 +4015,7 @@ func (x Float32x8) Min(y Float32x8) Float32x8 // Min computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x16) Min(y Float32x16) Float32x16 // Min computes the minimum of corresponding elements. @@ -4030,7 +4030,7 @@ func (x Float64x4) Min(y Float64x4) Float64x4 // Min computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x8) Min(y Float64x8) Float64x8 // Min computes the minimum of corresponding elements. @@ -4045,7 +4045,7 @@ func (x Int8x32) Min(y Int8x32) Int8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x64) Min(y Int8x64) Int8x64 // Min computes the minimum of corresponding elements. @@ -4060,7 +4060,7 @@ func (x Int16x16) Min(y Int16x16) Int16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x32) Min(y Int16x32) Int16x32 // Min computes the minimum of corresponding elements. @@ -4075,22 +4075,22 @@ func (x Int32x8) Min(y Int32x8) Int32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x16) Min(y Int32x16) Int32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x2) Min(y Int64x2) Int64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x4) Min(y Int64x4) Int64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x8) Min(y Int64x8) Int64x8 // Min computes the minimum of corresponding elements. @@ -4105,7 +4105,7 @@ func (x Uint8x32) Min(y Uint8x32) Uint8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x64) Min(y Uint8x64) Uint8x64 // Min computes the minimum of corresponding elements. @@ -4120,7 +4120,7 @@ func (x Uint16x16) Min(y Uint16x16) Uint16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x32) Min(y Uint16x32) Uint16x32 // Min computes the minimum of corresponding elements. @@ -4135,174 +4135,174 @@ func (x Uint32x8) Min(y Uint32x8) Uint32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x16) Min(y Uint32x16) Uint32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x2) Min(y Uint64x2) Uint64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x4) Min(y Uint64x4) Uint64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x8) Min(y Uint64x8) Uint64x8 /* MinMasked */ // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Mul */ @@ -4319,7 +4319,7 @@ func (x Float32x8) Mul(y Float32x8) Float32x8 // Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) Mul(y Float32x16) Float32x16 // Mul multiplies corresponding elements of two vectors. @@ -4334,71 +4334,71 @@ func (x Float64x4) Mul(y Float64x4) Float64x4 // Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) Mul(y Float64x8) Float64x8 /* MulByPowOf2 */ // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 /* MulByPowOf2Masked */ // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 /* MulEvenWiden */ @@ -4418,19 +4418,19 @@ func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 // MulEvenWiden multiplies even-indexed elements, widening the result. @@ -4448,19 +4448,19 @@ func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 /* MulEvenWidenMasked */ @@ -4468,37 +4468,37 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, z Mask64x8) Uint64x8 /* MulHigh */ @@ -4515,7 +4515,7 @@ func (x Int16x16) MulHigh(y Int16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHigh(y Int16x32) Int16x32 // MulHigh multiplies elements and stores the high part of the result. @@ -4530,39 +4530,39 @@ func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 /* MulLow */ @@ -4579,7 +4579,7 @@ func (x Int16x16) MulLow(y Int16x16) Int16x16 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLow(y Int16x32) Int16x32 // MulLow multiplies elements and stores the low part of the result. @@ -4594,101 +4594,101 @@ func (x Int32x8) MulLow(y Int32x8) Int32x8 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLow(y Int32x16) Int32x16 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLow(y Int64x2) Int64x2 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLow(y Int64x4) Int64x4 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* MulLowMasked */ // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 /* MulMasked */ // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 /* NotEqual */ @@ -4705,7 +4705,7 @@ func (x Float32x8) NotEqual(y Float32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) NotEqual(y Float32x16) Mask32x16 // NotEqual compares for inequality. @@ -4720,279 +4720,279 @@ func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) NotEqual(y Float64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) NotEqual(y Int8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) NotEqual(y Int8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) NotEqual(y Int8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) NotEqual(y Int16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) NotEqual(y Int32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) NotEqual(y Int32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) NotEqual(y Int32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) NotEqual(y Int64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) NotEqual(y Int64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) NotEqual(y Int64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* NotEqualMasked */ // NotEqualMasked compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* Or */ @@ -5029,7 +5029,7 @@ func (x Int32x8) Or(y Int32x8) Int32x8 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) Or(y Int32x16) Int32x16 // Or performs a bitwise OR operation between two vectors. @@ -5044,7 +5044,7 @@ func (x Int64x4) Or(y Int64x4) Int64x4 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) Or(y Int64x8) Int64x8 // Or performs a bitwise OR operation between two vectors. @@ -5079,7 +5079,7 @@ func (x Uint32x8) Or(y Uint32x8) Uint32x8 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) Or(y Uint32x16) Uint32x16 // Or performs a bitwise OR operation between two vectors. @@ -5094,69 +5094,69 @@ func (x Uint64x4) Or(y Uint64x4) Uint64x4 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 /* PairDotProd */ @@ -5176,41 +5176,41 @@ func (x Int16x16) PairDotProd(y Int16x16) Int32x8 // PairDotProd multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProd(y Int16x32) Int32x16 /* PairDotProdAccumulate */ // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +// Asm: VPDPWSSD, CPU Feature: AVXVNNI func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +// Asm: VPDPWSSD, CPU Feature: AVXVNNI func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* PairDotProdAccumulateMasked */ // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* PairDotProdMasked */ @@ -5218,19 +5218,19 @@ func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x1 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProdMasked(y Int16x32, z Mask16x32) Int32x16 /* PairwiseAdd */ @@ -5385,244 +5385,244 @@ func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x16) PopCount() Int8x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x32) PopCount() Int8x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x64) PopCount() Int8x64 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x8) PopCount() Int16x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x16) PopCount() Int16x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x32) PopCount() Int16x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x4) PopCount() Int32x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x8) PopCount() Int32x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x16) PopCount() Int32x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x2) PopCount() Int64x2 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x4) PopCount() Int64x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x8) PopCount() Int64x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x16) PopCount() Uint8x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x32) PopCount() Uint8x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x64) PopCount() Uint8x64 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x8) PopCount() Uint16x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x16) PopCount() Uint16x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x32) PopCount() Uint16x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x4) PopCount() Uint32x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x8) PopCount() Uint32x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x16) PopCount() Uint32x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x2) PopCount() Uint64x2 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x4) PopCount() Uint64x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x8) PopCount() Uint64x8 /* PopCountMasked */ // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 /* RotateAllLeft */ @@ -5631,84 +5631,84 @@ func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 /* RotateAllLeftMasked */ @@ -5717,84 +5717,84 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateAllRight */ @@ -5803,84 +5803,84 @@ func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 /* RotateAllRightMasked */ @@ -5889,332 +5889,332 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateLeft */ // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x4) RotateLeft(y Int32x4) Int32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x8) RotateLeft(y Int32x8) Int32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x16) RotateLeft(y Int32x16) Int32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x2) RotateLeft(y Int64x2) Int64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x4) RotateLeft(y Int64x4) Int64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x8) RotateLeft(y Int64x8) Int64x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 /* RotateLeftMasked */ // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* RotateRight */ // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x4) RotateRight(y Int32x4) Int32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x8) RotateRight(y Int32x8) Int32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x16) RotateRight(y Int32x16) Int32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x2) RotateRight(y Int64x2) Int64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x4) RotateRight(y Int64x4) Int64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x8) RotateRight(y Int64x8) Int64x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* RotateRightMasked */ // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Round */ @@ -6245,42 +6245,42 @@ func (x Float64x4) Round() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundWithPrecision(prec uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundWithPrecision(prec uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundWithPrecision(prec uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundWithPrecision(prec uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundWithPrecision(prec uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 /* RoundWithPrecisionMasked */ @@ -6289,42 +6289,42 @@ func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* SaturatedAdd */ @@ -6341,7 +6341,7 @@ func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -6356,7 +6356,7 @@ func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -6371,7 +6371,7 @@ func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -6386,103 +6386,103 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 /* SaturatedAddMasked */ // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 /* SaturatedPairDotProdAccumulate */ // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* SaturatedPairDotProdAccumulateMasked */ // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* SaturatedPairwiseAdd */ @@ -6527,7 +6527,7 @@ func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 // SaturatedSub subtracts corresponding elements of two vectors with saturation. @@ -6542,7 +6542,7 @@ func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 // SaturatedSub subtracts corresponding elements of two vectors with saturation. @@ -6557,7 +6557,7 @@ func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 // SaturatedSub subtracts corresponding elements of two vectors with saturation. @@ -6572,69 +6572,69 @@ func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 /* SaturatedSubMasked */ // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 /* SaturatedUnsignedSignedPairDotProd */ @@ -6654,7 +6654,7 @@ func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 // SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 /* SaturatedUnsignedSignedPairDotProdMasked */ @@ -6662,83 +6662,83 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, z Mask16x32) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* Set128 */ @@ -6885,7 +6885,7 @@ func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllLeft(y uint64) Int16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6900,7 +6900,7 @@ func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x16) ShiftAllLeft(y uint64) Int32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6915,7 +6915,7 @@ func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6930,7 +6930,7 @@ func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllLeft(y uint64) Uint16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6945,7 +6945,7 @@ func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllLeft(y uint64) Uint32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6960,7 +6960,7 @@ func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 /* ShiftAllLeftAndFillUpperFrom */ @@ -6970,7 +6970,7 @@ func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -6978,7 +6978,7 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -6986,7 +6986,7 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -6994,7 +6994,7 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7002,7 +7002,7 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7010,7 +7010,7 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7018,7 +7018,7 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7026,7 +7026,7 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7034,7 +7034,7 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7042,7 +7042,7 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7050,7 +7050,7 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7058,7 +7058,7 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7066,7 +7066,7 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7074,7 +7074,7 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7082,7 +7082,7 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7090,7 +7090,7 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7098,7 +7098,7 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7106,7 +7106,7 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllLeftAndFillUpperFromMasked */ @@ -7116,7 +7116,7 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7124,7 +7124,7 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7132,7 +7132,7 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7140,7 +7140,7 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7148,7 +7148,7 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7156,7 +7156,7 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7164,7 +7164,7 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7172,7 +7172,7 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7180,7 +7180,7 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7188,7 +7188,7 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7196,7 +7196,7 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7204,7 +7204,7 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7212,7 +7212,7 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7220,7 +7220,7 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7228,7 +7228,7 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7236,7 +7236,7 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7244,7 +7244,7 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7252,99 +7252,99 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftAllRight */ @@ -7361,7 +7361,7 @@ func (x Int16x16) ShiftAllRight(y uint64) Int16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllRight(y uint64) Int16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. @@ -7376,22 +7376,22 @@ func (x Int32x8) ShiftAllRight(y uint64) Int32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x16) ShiftAllRight(y uint64) Int32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllRight(y uint64) Int64x2 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllRight(y uint64) Int64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllRight(y uint64) Int64x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7406,7 +7406,7 @@ func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllRight(y uint64) Uint16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7421,7 +7421,7 @@ func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllRight(y uint64) Uint32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7436,7 +7436,7 @@ func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 /* ShiftAllRightAndFillUpperFrom */ @@ -7446,7 +7446,7 @@ func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7454,7 +7454,7 @@ func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7462,7 +7462,7 @@ func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7470,7 +7470,7 @@ func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x3 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7478,7 +7478,7 @@ func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7486,7 +7486,7 @@ func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7494,7 +7494,7 @@ func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7502,7 +7502,7 @@ func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7510,7 +7510,7 @@ func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7518,7 +7518,7 @@ func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7526,7 +7526,7 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7534,7 +7534,7 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7542,7 +7542,7 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7550,7 +7550,7 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7558,7 +7558,7 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7566,7 +7566,7 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint3 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7574,7 +7574,7 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7582,7 +7582,7 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllRightAndFillUpperFromMasked */ @@ -7592,7 +7592,7 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7600,7 +7600,7 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7608,7 +7608,7 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7616,7 +7616,7 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7624,7 +7624,7 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7632,7 +7632,7 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7640,7 +7640,7 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7648,7 +7648,7 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7656,7 +7656,7 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7664,7 +7664,7 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7672,7 +7672,7 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7680,7 +7680,7 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7688,7 +7688,7 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7696,7 +7696,7 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7704,7 +7704,7 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7712,7 +7712,7 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7720,7 +7720,7 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7728,116 +7728,116 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllRightMasked */ // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftLeft */ // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7852,7 +7852,7 @@ func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7867,22 +7867,22 @@ func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7897,7 +7897,7 @@ func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7912,7 +7912,7 @@ func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 /* ShiftLeftAndFillUpperFrom */ @@ -7920,109 +7920,109 @@ func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftLeftAndFillUpperFromMasked */ @@ -8030,218 +8030,218 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 /* ShiftLeftMasked */ // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ShiftRight */ // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x8) ShiftRight(y Int16x8) Int16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x16) ShiftRight(y Int16x16) Int16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x32) ShiftRight(y Int16x32) Int16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. @@ -8256,37 +8256,37 @@ func (x Int32x8) ShiftRight(y Int32x8) Int32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x16) ShiftRight(y Int32x16) Int32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x2) ShiftRight(y Int64x2) Int64x2 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x4) ShiftRight(y Int64x4) Int64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x8) ShiftRight(y Int64x8) Int64x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8301,7 +8301,7 @@ func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8316,7 +8316,7 @@ func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 /* ShiftRightAndFillUpperFrom */ @@ -8324,109 +8324,109 @@ func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftRightAndFillUpperFromMasked */ @@ -8434,201 +8434,201 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 /* ShiftRightMasked */ // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Sign */ @@ -8683,7 +8683,7 @@ func (x Float32x8) Sqrt() Float32x8 // Sqrt computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x16) Sqrt() Float32x16 // Sqrt computes the square root of each element. @@ -8698,39 +8698,39 @@ func (x Float64x4) Sqrt() Float64x4 // Sqrt computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x8) Sqrt() Float64x8 /* SqrtMasked */ // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 /* Sub */ @@ -8747,7 +8747,7 @@ func (x Float32x8) Sub(y Float32x8) Float32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x16) Sub(y Float32x16) Float32x16 // Sub subtracts corresponding elements of two vectors. @@ -8762,7 +8762,7 @@ func (x Float64x4) Sub(y Float64x4) Float64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x8) Sub(y Float64x8) Float64x8 // Sub subtracts corresponding elements of two vectors. @@ -8777,7 +8777,7 @@ func (x Int8x32) Sub(y Int8x32) Int8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x64) Sub(y Int8x64) Int8x64 // Sub subtracts corresponding elements of two vectors. @@ -8792,7 +8792,7 @@ func (x Int16x16) Sub(y Int16x16) Int16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x32) Sub(y Int16x32) Int16x32 // Sub subtracts corresponding elements of two vectors. @@ -8807,7 +8807,7 @@ func (x Int32x8) Sub(y Int32x8) Int32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x16) Sub(y Int32x16) Int32x16 // Sub subtracts corresponding elements of two vectors. @@ -8822,7 +8822,7 @@ func (x Int64x4) Sub(y Int64x4) Int64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x8) Sub(y Int64x8) Int64x8 // Sub subtracts corresponding elements of two vectors. @@ -8837,7 +8837,7 @@ func (x Uint8x32) Sub(y Uint8x32) Uint8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x64) Sub(y Uint8x64) Uint8x64 // Sub subtracts corresponding elements of two vectors. @@ -8852,7 +8852,7 @@ func (x Uint16x16) Sub(y Uint16x16) Uint16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x32) Sub(y Uint16x32) Uint16x32 // Sub subtracts corresponding elements of two vectors. @@ -8867,7 +8867,7 @@ func (x Uint32x8) Sub(y Uint32x8) Uint32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x16) Sub(y Uint32x16) Uint32x16 // Sub subtracts corresponding elements of two vectors. @@ -8882,159 +8882,159 @@ func (x Uint64x4) Sub(y Uint64x4) Uint64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* SubMasked */ // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Trunc */ @@ -9065,42 +9065,42 @@ func (x Float64x4) Trunc() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncWithPrecision(prec uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncWithPrecision(prec uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncWithPrecision(prec uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncWithPrecision(prec uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncWithPrecision(prec uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 /* TruncWithPrecisionMasked */ @@ -9109,106 +9109,106 @@ func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* UnsignedSignedQuadDotProdAccumulateMasked */ // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* Xor */ @@ -9245,7 +9245,7 @@ func (x Int32x8) Xor(y Int32x8) Int32x8 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) Xor(y Int32x16) Int32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -9260,7 +9260,7 @@ func (x Int64x4) Xor(y Int64x4) Int64x4 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) Xor(y Int64x8) Int64x8 // Xor performs a bitwise XOR operation between two vectors. @@ -9295,7 +9295,7 @@ func (x Uint32x8) Xor(y Uint32x8) Uint32x8 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) Xor(y Uint32x16) Uint32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -9310,69 +9310,69 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 // Float64x2 converts from Float32x4 to Float64x2 From f5f42753ab7653fea7b3e4ae9f0c5cf72c8b6a47 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 17:23:19 +0000 Subject: [PATCH 084/139] [dev.simd] cmd/compile, simd: add VDPPS This CL is generated by CL 687915. Change-Id: I1a2fb031c086b2b23fd135c48f8494ba5122493a Reviewed-on: https://go-review.googlesource.com/c/go/+/687916 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 4 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 2 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 2 + .../internal/ssa/_gen/simdgenericOps.go | 2 + src/cmd/compile/internal/ssa/opGen.go | 48 +++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 32 +++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 2 + src/simd/ops_amd64.go | 10 ++++ src/simd/simd_wrapped_test.go | 4 ++ 9 files changed, 105 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e2d0dd17c65f73..0ebb955accda3f 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -650,7 +650,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQMasked512: p = simdVkvImm8(s, v) - case ssa.OpAMD64VDPPD128, + case ssa.OpAMD64VDPPS128, + ssa.OpAMD64VDPPS256, + ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6043edad703012..0cbca8bf72491b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -264,6 +264,8 @@ (DivMaskedFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) (DivMaskedFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) (DivMaskedFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) +(DotProdBroadcastFloat32x4 x y) => (VDPPS128 [127] x y) +(DotProdBroadcastFloat32x8 x y) => (VDPPS256 [127] x y) (DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 3f777db5b7d58e..6985daa04bcaaa 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -736,6 +736,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -743,6 +744,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 1180d32586b41d..a1dfc1e7da7a6b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -53,6 +53,7 @@ func simdGenericOps() []opData { {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, @@ -100,6 +101,7 @@ func simdGenericOps() []opData { {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9067023f3a7c19..ba28c58b7edf50 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1931,6 +1931,7 @@ const ( OpAMD64VRNDSCALEPSMasked128 OpAMD64VREDUCEPS128 OpAMD64VREDUCEPSMasked128 + OpAMD64VDPPS128 OpAMD64VCMPPS128 OpAMD64VCMPPSMasked128 OpAMD64VROUNDPS256 @@ -1938,6 +1939,7 @@ const ( OpAMD64VRNDSCALEPSMasked256 OpAMD64VREDUCEPS256 OpAMD64VREDUCEPSMasked256 + OpAMD64VDPPS256 OpAMD64VCMPPS256 OpAMD64VCMPPSMasked256 OpAMD64VEXTRACTF128128 @@ -4369,6 +4371,7 @@ const ( OpCeilFloat32x4 OpDivFloat32x4 OpDivMaskedFloat32x4 + OpDotProdBroadcastFloat32x4 OpEqualFloat32x4 OpEqualMaskedFloat32x4 OpFloorFloat32x4 @@ -4416,6 +4419,7 @@ const ( OpCeilFloat32x8 OpDivFloat32x8 OpDivMaskedFloat32x8 + OpDotProdBroadcastFloat32x8 OpEqualFloat32x8 OpEqualMaskedFloat32x8 OpFloorFloat32x8 @@ -29582,6 +29586,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VDPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS128", auxType: auxInt8, @@ -29687,6 +29707,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VDPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS256", auxType: auxInt8, @@ -59497,6 +59533,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "DotProdBroadcastFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, { name: "EqualFloat32x4", argLen: 2, @@ -59746,6 +59788,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "DotProdBroadcastFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, { name: "EqualFloat32x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d78c9212cbae09..6d10b009bb90cf 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1407,6 +1407,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDivMaskedFloat64x4(v) case OpDivMaskedFloat64x8: return rewriteValueAMD64_OpDivMaskedFloat64x8(v) + case OpDotProdBroadcastFloat32x4: + return rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v) + case OpDotProdBroadcastFloat32x8: + return rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v) case OpDotProdBroadcastFloat64x2: return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) case OpEq16: @@ -32312,6 +32316,34 @@ func rewriteValueAMD64_OpDivMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DotProdBroadcastFloat32x4 x y) + // result: (VDPPS128 [127] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDPPS128) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DotProdBroadcastFloat32x8 x y) + // result: (VDPPS256 [127] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDPPS256) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 085c0b8d995b6c..58bc420fc4e304 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -275,6 +275,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.DivMasked", opLen3(ssa.OpDivMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.DivMasked", opLen3(ssa.OpDivMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.DivMasked", opLen3(ssa.OpDivMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 2c17300ae44d76..7a8780e5cba790 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1502,6 +1502,16 @@ func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 /* DotProdBroadcast */ +// DotProdBroadcast multiplies all elements and broadcasts the sum. +// +// Asm: VDPPS, CPU Feature: AVX +func (x Float32x4) DotProdBroadcast(y Float32x4) Float32x4 + +// DotProdBroadcast multiplies all elements and broadcasts the sum. +// +// Asm: VDPPS, CPU Feature: AVX +func (x Float32x8) DotProdBroadcast(y Float32x8) Float32x8 + // DotProdBroadcast multiplies all elements and broadcasts the sum. // // Asm: VDPPD, CPU Feature: AVX diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 15e5c45097c526..6466684068e964 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -22,6 +22,8 @@ func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.AddSub(vec1) case "Div": gotv = vec0.Div(vec1) + case "DotProdBroadcast": + gotv = vec0.DotProdBroadcast(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -272,6 +274,8 @@ func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.AddSub(vec1) case "Div": gotv = vec0.Div(vec1) + case "DotProdBroadcast": + gotv = vec0.DotProdBroadcast(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": From 01f7f57025b017de6a50686c77945e3f99285505 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 19:39:44 +0000 Subject: [PATCH 085/139] [dev.simd] cmd/compile, simd: add variable Permute This CL also added some tests for them. This CL is generated by CL 687919. Change-Id: I9ddd2cd23bb98ecca91bfbeaffd62faa4bd85e0d Reviewed-on: https://go-review.googlesource.com/c/go/+/687939 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 96 + .../compile/internal/ssa/_gen/simdAMD64.rules | 108 ++ .../compile/internal/ssa/_gen/simdAMD64ops.go | 64 + .../internal/ssa/_gen/simdgenericOps.go | 108 ++ src/cmd/compile/internal/ssa/opGen.go | 1712 +++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 1302 +++++++++++++ src/cmd/compile/internal/ssagen/intrinsics.go | 24 + .../compile/internal/ssagen/simdintrinsics.go | 108 ++ src/simd/ops_amd64.go | 824 ++++++++ src/simd/simd_test.go | 35 + src/simd/simd_wrapped_test.go | 4 + 11 files changed, 4385 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 0ebb955accda3f..1a7e3be9e50d27 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -233,6 +233,20 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBD128, ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPERMB128, + ssa.OpAMD64VPERMB256, + ssa.OpAMD64VPERMB512, + ssa.OpAMD64VPERMW128, + ssa.OpAMD64VPERMW256, + ssa.OpAMD64VPERMW512, + ssa.OpAMD64VPERMPS256, + ssa.OpAMD64VPERMD256, + ssa.OpAMD64VPERMPS512, + ssa.OpAMD64VPERMD512, + ssa.OpAMD64VPERMPD256, + ssa.OpAMD64VPERMQ256, + ssa.OpAMD64VPERMPD512, + ssa.OpAMD64VPERMQ512, ssa.OpAMD64VPROLVD128, ssa.OpAMD64VPROLVD256, ssa.OpAMD64VPROLVD512, @@ -468,6 +482,20 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPERMBMasked128, + ssa.OpAMD64VPERMBMasked256, + ssa.OpAMD64VPERMBMasked512, + ssa.OpAMD64VPERMWMasked128, + ssa.OpAMD64VPERMWMasked256, + ssa.OpAMD64VPERMWMasked512, + ssa.OpAMD64VPERMPSMasked256, + ssa.OpAMD64VPERMDMasked256, + ssa.OpAMD64VPERMPSMasked512, + ssa.OpAMD64VPERMDMasked512, + ssa.OpAMD64VPERMPDMasked256, + ssa.OpAMD64VPERMQMasked256, + ssa.OpAMD64VPERMPDMasked512, + ssa.OpAMD64VPERMQMasked512, ssa.OpAMD64VPROLVDMasked128, ssa.OpAMD64VPROLVDMasked256, ssa.OpAMD64VPROLVDMasked512, @@ -766,6 +794,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VPERMI2B128, + ssa.OpAMD64VPERMI2B256, + ssa.OpAMD64VPERMI2B512, + ssa.OpAMD64VPERMI2W128, + ssa.OpAMD64VPERMI2W256, + ssa.OpAMD64VPERMI2W512, + ssa.OpAMD64VPERMI2PS128, + ssa.OpAMD64VPERMI2D128, + ssa.OpAMD64VPERMI2PS256, + ssa.OpAMD64VPERMI2D256, + ssa.OpAMD64VPERMI2PS512, + ssa.OpAMD64VPERMI2D512, + ssa.OpAMD64VPERMI2PD128, + ssa.OpAMD64VPERMI2Q128, + ssa.OpAMD64VPERMI2PD256, + ssa.OpAMD64VPERMI2Q256, + ssa.OpAMD64VPERMI2PD512, + ssa.OpAMD64VPERMI2Q512, ssa.OpAMD64VPDPWSSDS128, ssa.OpAMD64VPDPWSSDS256, ssa.OpAMD64VPDPWSSDS512, @@ -816,6 +862,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPERMI2BMasked128, + ssa.OpAMD64VPERMI2BMasked256, + ssa.OpAMD64VPERMI2BMasked512, + ssa.OpAMD64VPERMI2WMasked128, + ssa.OpAMD64VPERMI2WMasked256, + ssa.OpAMD64VPERMI2WMasked512, + ssa.OpAMD64VPERMI2PSMasked128, + ssa.OpAMD64VPERMI2DMasked128, + ssa.OpAMD64VPERMI2PSMasked256, + ssa.OpAMD64VPERMI2DMasked256, + ssa.OpAMD64VPERMI2PSMasked512, + ssa.OpAMD64VPERMI2DMasked512, + ssa.OpAMD64VPERMI2PDMasked128, + ssa.OpAMD64VPERMI2QMasked128, + ssa.OpAMD64VPERMI2PDMasked256, + ssa.OpAMD64VPERMI2QMasked256, + ssa.OpAMD64VPERMI2PDMasked512, + ssa.OpAMD64VPERMI2QMasked512, ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, ssa.OpAMD64VPDPWSSDSMasked512, @@ -1158,6 +1222,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPERMI2BMasked128, + ssa.OpAMD64VPERMI2BMasked256, + ssa.OpAMD64VPERMI2BMasked512, + ssa.OpAMD64VPERMI2WMasked128, + ssa.OpAMD64VPERMI2WMasked256, + ssa.OpAMD64VPERMI2WMasked512, + ssa.OpAMD64VPERMI2PSMasked128, + ssa.OpAMD64VPERMI2DMasked128, + ssa.OpAMD64VPERMI2PSMasked256, + ssa.OpAMD64VPERMI2DMasked256, + ssa.OpAMD64VPERMI2PSMasked512, + ssa.OpAMD64VPERMI2DMasked512, + ssa.OpAMD64VPERMI2PDMasked128, + ssa.OpAMD64VPERMI2QMasked128, + ssa.OpAMD64VPERMI2PDMasked256, + ssa.OpAMD64VPERMI2QMasked256, + ssa.OpAMD64VPERMI2PDMasked512, + ssa.OpAMD64VPERMI2QMasked512, + ssa.OpAMD64VPERMBMasked128, + ssa.OpAMD64VPERMBMasked256, + ssa.OpAMD64VPERMBMasked512, + ssa.OpAMD64VPERMWMasked128, + ssa.OpAMD64VPERMWMasked256, + ssa.OpAMD64VPERMWMasked512, + ssa.OpAMD64VPERMPSMasked256, + ssa.OpAMD64VPERMDMasked256, + ssa.OpAMD64VPERMPSMasked512, + ssa.OpAMD64VPERMDMasked512, + ssa.OpAMD64VPERMPDMasked256, + ssa.OpAMD64VPERMQMasked256, + ssa.OpAMD64VPERMPDMasked512, + ssa.OpAMD64VPERMQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 0cbca8bf72491b..5898406e9d351c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -985,6 +985,114 @@ (PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) (PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) (PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) +(PermuteFloat32x8 ...) => (VPERMPS256 ...) +(PermuteFloat32x16 ...) => (VPERMPS512 ...) +(PermuteFloat64x4 ...) => (VPERMPD256 ...) +(PermuteFloat64x8 ...) => (VPERMPD512 ...) +(PermuteInt8x16 ...) => (VPERMB128 ...) +(PermuteInt8x32 ...) => (VPERMB256 ...) +(PermuteInt8x64 ...) => (VPERMB512 ...) +(PermuteInt16x8 ...) => (VPERMW128 ...) +(PermuteInt16x16 ...) => (VPERMW256 ...) +(PermuteInt16x32 ...) => (VPERMW512 ...) +(PermuteInt32x8 ...) => (VPERMD256 ...) +(PermuteInt32x16 ...) => (VPERMD512 ...) +(PermuteInt64x4 ...) => (VPERMQ256 ...) +(PermuteInt64x8 ...) => (VPERMQ512 ...) +(PermuteUint8x16 ...) => (VPERMB128 ...) +(PermuteUint8x32 ...) => (VPERMB256 ...) +(PermuteUint8x64 ...) => (VPERMB512 ...) +(PermuteUint16x8 ...) => (VPERMW128 ...) +(PermuteUint16x16 ...) => (VPERMW256 ...) +(PermuteUint16x32 ...) => (VPERMW512 ...) +(PermuteUint32x8 ...) => (VPERMD256 ...) +(PermuteUint32x16 ...) => (VPERMD512 ...) +(PermuteUint64x4 ...) => (VPERMQ256 ...) +(PermuteUint64x8 ...) => (VPERMQ512 ...) +(Permute2Float32x4 ...) => (VPERMI2PS128 ...) +(Permute2Float32x8 ...) => (VPERMI2PS256 ...) +(Permute2Float32x16 ...) => (VPERMI2PS512 ...) +(Permute2Float64x2 ...) => (VPERMI2PD128 ...) +(Permute2Float64x4 ...) => (VPERMI2PD256 ...) +(Permute2Float64x8 ...) => (VPERMI2PD512 ...) +(Permute2Int8x16 ...) => (VPERMI2B128 ...) +(Permute2Int8x32 ...) => (VPERMI2B256 ...) +(Permute2Int8x64 ...) => (VPERMI2B512 ...) +(Permute2Int16x8 ...) => (VPERMI2W128 ...) +(Permute2Int16x16 ...) => (VPERMI2W256 ...) +(Permute2Int16x32 ...) => (VPERMI2W512 ...) +(Permute2Int32x4 ...) => (VPERMI2D128 ...) +(Permute2Int32x8 ...) => (VPERMI2D256 ...) +(Permute2Int32x16 ...) => (VPERMI2D512 ...) +(Permute2Int64x2 ...) => (VPERMI2Q128 ...) +(Permute2Int64x4 ...) => (VPERMI2Q256 ...) +(Permute2Int64x8 ...) => (VPERMI2Q512 ...) +(Permute2Uint8x16 ...) => (VPERMI2B128 ...) +(Permute2Uint8x32 ...) => (VPERMI2B256 ...) +(Permute2Uint8x64 ...) => (VPERMI2B512 ...) +(Permute2Uint16x8 ...) => (VPERMI2W128 ...) +(Permute2Uint16x16 ...) => (VPERMI2W256 ...) +(Permute2Uint16x32 ...) => (VPERMI2W512 ...) +(Permute2Uint32x4 ...) => (VPERMI2D128 ...) +(Permute2Uint32x8 ...) => (VPERMI2D256 ...) +(Permute2Uint32x16 ...) => (VPERMI2D512 ...) +(Permute2Uint64x2 ...) => (VPERMI2Q128 ...) +(Permute2Uint64x4 ...) => (VPERMI2Q256 ...) +(Permute2Uint64x8 ...) => (VPERMI2Q512 ...) +(Permute2MaskedFloat32x4 x y z mask) => (VPERMI2PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(Permute2MaskedFloat32x8 x y z mask) => (VPERMI2PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(Permute2MaskedFloat32x16 x y z mask) => (VPERMI2PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(Permute2MaskedFloat64x2 x y z mask) => (VPERMI2PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(Permute2MaskedFloat64x4 x y z mask) => (VPERMI2PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(Permute2MaskedFloat64x8 x y z mask) => (VPERMI2PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(Permute2MaskedInt8x16 x y z mask) => (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) +(Permute2MaskedInt8x32 x y z mask) => (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) +(Permute2MaskedInt8x64 x y z mask) => (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) +(Permute2MaskedInt16x8 x y z mask) => (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) +(Permute2MaskedInt16x16 x y z mask) => (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) +(Permute2MaskedInt16x32 x y z mask) => (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) +(Permute2MaskedInt32x4 x y z mask) => (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) +(Permute2MaskedInt32x8 x y z mask) => (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) +(Permute2MaskedInt32x16 x y z mask) => (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) +(Permute2MaskedInt64x2 x y z mask) => (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) +(Permute2MaskedInt64x4 x y z mask) => (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) +(Permute2MaskedInt64x8 x y z mask) => (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) +(Permute2MaskedUint8x16 x y z mask) => (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) +(Permute2MaskedUint8x32 x y z mask) => (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) +(Permute2MaskedUint8x64 x y z mask) => (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) +(Permute2MaskedUint16x8 x y z mask) => (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) +(Permute2MaskedUint16x16 x y z mask) => (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) +(Permute2MaskedUint16x32 x y z mask) => (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) +(Permute2MaskedUint32x4 x y z mask) => (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) +(Permute2MaskedUint32x8 x y z mask) => (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) +(Permute2MaskedUint32x16 x y z mask) => (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) +(Permute2MaskedUint64x2 x y z mask) => (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) +(Permute2MaskedUint64x4 x y z mask) => (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) +(Permute2MaskedUint64x8 x y z mask) => (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) +(PermuteMaskedFloat32x8 x y mask) => (VPERMPSMasked256 x y (VPMOVVec32x8ToM mask)) +(PermuteMaskedFloat32x16 x y mask) => (VPERMPSMasked512 x y (VPMOVVec32x16ToM mask)) +(PermuteMaskedFloat64x4 x y mask) => (VPERMPDMasked256 x y (VPMOVVec64x4ToM mask)) +(PermuteMaskedFloat64x8 x y mask) => (VPERMPDMasked512 x y (VPMOVVec64x8ToM mask)) +(PermuteMaskedInt8x16 x y mask) => (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) +(PermuteMaskedInt8x32 x y mask) => (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) +(PermuteMaskedInt8x64 x y mask) => (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) +(PermuteMaskedInt16x8 x y mask) => (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) +(PermuteMaskedInt16x16 x y mask) => (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) +(PermuteMaskedInt16x32 x y mask) => (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) +(PermuteMaskedInt32x8 x y mask) => (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) +(PermuteMaskedInt32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) +(PermuteMaskedInt64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) +(PermuteMaskedInt64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) +(PermuteMaskedUint8x16 x y mask) => (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) +(PermuteMaskedUint8x32 x y mask) => (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) +(PermuteMaskedUint8x64 x y mask) => (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) +(PermuteMaskedUint16x8 x y mask) => (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) +(PermuteMaskedUint16x16 x y mask) => (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) +(PermuteMaskedUint16x32 x y mask) => (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) +(PermuteMaskedUint32x8 x y mask) => (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) +(PermuteMaskedUint32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) +(PermuteMaskedUint64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) +(PermuteMaskedUint64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) (PopCountInt8x16 ...) => (VPOPCNTB128 ...) (PopCountInt8x32 ...) => (VPOPCNTB256 ...) (PopCountInt8x64 ...) => (VPOPCNTB512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 6985daa04bcaaa..19ac0b0dea6583 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -613,6 +613,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMW256", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2W256", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2WMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -625,6 +629,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMW512", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2W512", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2WMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -637,6 +645,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMW128", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2W128", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2WMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -645,6 +657,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -654,6 +674,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -663,6 +687,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -672,6 +704,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -681,6 +717,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -691,6 +735,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPD512", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQ512", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2Q512", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -703,6 +755,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2B128", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2BMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -713,6 +769,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2B256", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2BMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -723,6 +783,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2B512", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2BMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index a1dfc1e7da7a6b..dd27d0cc9411dd 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -889,6 +889,14 @@ func simdGenericOps() []opData { {name: "OrUint16x16", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, + {name: "PermuteInt16x16", argLength: 2, commutative: false}, + {name: "PermuteUint16x16", argLength: 2, commutative: false}, + {name: "Permute2Uint16x16", argLength: 3, commutative: false}, + {name: "Permute2Int16x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -932,6 +940,14 @@ func simdGenericOps() []opData { {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, + {name: "PermuteUint16x32", argLength: 2, commutative: false}, + {name: "PermuteInt16x32", argLength: 2, commutative: false}, + {name: "Permute2Int16x32", argLength: 3, commutative: false}, + {name: "Permute2Uint16x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, @@ -979,6 +995,14 @@ func simdGenericOps() []opData { {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, + {name: "PermuteUint16x8", argLength: 2, commutative: false}, + {name: "PermuteInt16x8", argLength: 2, commutative: false}, + {name: "Permute2Int16x8", argLength: 3, commutative: false}, + {name: "Permute2Uint16x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, @@ -1024,6 +1048,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, + {name: "PermuteInt32x16", argLength: 2, commutative: false}, + {name: "PermuteUint32x16", argLength: 2, commutative: false}, + {name: "PermuteFloat32x16", argLength: 2, commutative: false}, + {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "Permute2Uint32x16", argLength: 3, commutative: false}, + {name: "Permute2Float32x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, @@ -1077,6 +1113,12 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "Permute2Uint32x4", argLength: 3, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, + {name: "Permute2Int32x4", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, @@ -1130,6 +1172,18 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "PermuteInt32x8", argLength: 2, commutative: false}, + {name: "PermuteFloat32x8", argLength: 2, commutative: false}, + {name: "PermuteUint32x8", argLength: 2, commutative: false}, + {name: "Permute2Uint32x8", argLength: 3, commutative: false}, + {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "Permute2Int32x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, @@ -1182,6 +1236,12 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, + {name: "Permute2Uint64x2", argLength: 3, commutative: false}, + {name: "Permute2Int64x2", argLength: 3, commutative: false}, + {name: "Permute2Float64x2", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, @@ -1230,6 +1290,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, + {name: "PermuteUint64x4", argLength: 2, commutative: false}, + {name: "PermuteInt64x4", argLength: 2, commutative: false}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, + {name: "Permute2Uint64x4", argLength: 3, commutative: false}, + {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1278,6 +1350,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, + {name: "PermuteInt64x8", argLength: 2, commutative: false}, + {name: "PermuteFloat64x8", argLength: 2, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2Uint64x8", argLength: 3, commutative: false}, + {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, @@ -1325,6 +1409,14 @@ func simdGenericOps() []opData { {name: "NotEqualUint8x16", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "OrUint8x16", argLength: 2, commutative: true}, + {name: "PermuteUint8x16", argLength: 2, commutative: false}, + {name: "PermuteInt8x16", argLength: 2, commutative: false}, + {name: "Permute2Uint8x16", argLength: 3, commutative: false}, + {name: "Permute2Int8x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, {name: "PopCountUint8x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, @@ -1361,6 +1453,14 @@ func simdGenericOps() []opData { {name: "NotEqualUint8x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "PermuteUint8x32", argLength: 2, commutative: false}, + {name: "PermuteInt8x32", argLength: 2, commutative: false}, + {name: "Permute2Int8x32", argLength: 3, commutative: false}, + {name: "Permute2Uint8x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, @@ -1394,6 +1494,14 @@ func simdGenericOps() []opData { {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, + {name: "PermuteUint8x64", argLength: 2, commutative: false}, + {name: "PermuteInt8x64", argLength: 2, commutative: false}, + {name: "Permute2Int8x64", argLength: 3, commutative: false}, + {name: "Permute2Uint8x64", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, {name: "PopCountUint8x64", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ba28c58b7edf50..60a12e21fb198e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1808,6 +1808,10 @@ const ( OpAMD64VPMINUWMasked256 OpAMD64VPMULHUW256 OpAMD64VPMULHUWMasked256 + OpAMD64VPERMW256 + OpAMD64VPERMI2W256 + OpAMD64VPERMI2WMasked256 + OpAMD64VPERMWMasked256 OpAMD64VPSRLW256 OpAMD64VPSRLWMasked256 OpAMD64VPSRLVW256 @@ -1820,6 +1824,10 @@ const ( OpAMD64VPMINUWMasked512 OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked512 + OpAMD64VPERMW512 + OpAMD64VPERMI2W512 + OpAMD64VPERMI2WMasked512 + OpAMD64VPERMWMasked512 OpAMD64VPSRLW512 OpAMD64VPSRLWMasked512 OpAMD64VPSRLVW512 @@ -1832,6 +1840,10 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMULHUW128 OpAMD64VPMULHUWMasked128 + OpAMD64VPERMW128 + OpAMD64VPERMI2W128 + OpAMD64VPERMI2WMasked128 + OpAMD64VPERMWMasked128 OpAMD64VPSRLW128 OpAMD64VPSRLWMasked128 OpAMD64VPSRLVW128 @@ -1840,6 +1852,14 @@ const ( OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 OpAMD64VPMINUDMasked512 + OpAMD64VPERMPS512 + OpAMD64VPERMD512 + OpAMD64VPERMI2D512 + OpAMD64VPERMI2PS512 + OpAMD64VPERMI2DMasked512 + OpAMD64VPERMI2PSMasked512 + OpAMD64VPERMPSMasked512 + OpAMD64VPERMDMasked512 OpAMD64VPSRLD512 OpAMD64VPSRLDMasked512 OpAMD64VPSRLVD512 @@ -1849,6 +1869,10 @@ const ( OpAMD64VPMINUD128 OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 + OpAMD64VPERMI2D128 + OpAMD64VPERMI2PS128 + OpAMD64VPERMI2PSMasked128 + OpAMD64VPERMI2DMasked128 OpAMD64VPSRLD128 OpAMD64VPSRLDMasked128 OpAMD64VPSRLVD128 @@ -1858,6 +1882,14 @@ const ( OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 + OpAMD64VPERMD256 + OpAMD64VPERMPS256 + OpAMD64VPERMI2D256 + OpAMD64VPERMI2PS256 + OpAMD64VPERMI2PSMasked256 + OpAMD64VPERMI2DMasked256 + OpAMD64VPERMPSMasked256 + OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 OpAMD64VPSRLDMasked256 OpAMD64VPSRLVD256 @@ -1867,6 +1899,10 @@ const ( OpAMD64VPMINUQ128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 + OpAMD64VPERMI2PD128 + OpAMD64VPERMI2Q128 + OpAMD64VPERMI2QMasked128 + OpAMD64VPERMI2PDMasked128 OpAMD64VPSRLQ128 OpAMD64VPSRLQMasked128 OpAMD64VPSRLVQ128 @@ -1876,6 +1912,14 @@ const ( OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 + OpAMD64VPERMQ256 + OpAMD64VPERMPD256 + OpAMD64VPERMI2PD256 + OpAMD64VPERMI2Q256 + OpAMD64VPERMI2PDMasked256 + OpAMD64VPERMI2QMasked256 + OpAMD64VPERMPDMasked256 + OpAMD64VPERMQMasked256 OpAMD64VPSRLQ256 OpAMD64VPSRLQMasked256 OpAMD64VPSRLVQ256 @@ -1886,6 +1930,14 @@ const ( OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQ512 OpAMD64VPMULUDQMasked512 + OpAMD64VPERMPD512 + OpAMD64VPERMQ512 + OpAMD64VPERMI2Q512 + OpAMD64VPERMI2PD512 + OpAMD64VPERMI2QMasked512 + OpAMD64VPERMI2PDMasked512 + OpAMD64VPERMPDMasked512 + OpAMD64VPERMQMasked512 OpAMD64VPSRLQ512 OpAMD64VPSRLQMasked512 OpAMD64VPSRLVQ512 @@ -1898,6 +1950,10 @@ const ( OpAMD64VPMAXUBMasked128 OpAMD64VPMINUB128 OpAMD64VPMINUBMasked128 + OpAMD64VPERMB128 + OpAMD64VPERMI2B128 + OpAMD64VPERMI2BMasked128 + OpAMD64VPERMBMasked128 OpAMD64VPMADDUBSW128 OpAMD64VPMADDUBSWMasked128 OpAMD64VPAVGB256 @@ -1908,6 +1964,10 @@ const ( OpAMD64VPMAXUBMasked256 OpAMD64VPMINUB256 OpAMD64VPMINUBMasked256 + OpAMD64VPERMB256 + OpAMD64VPERMI2B256 + OpAMD64VPERMI2BMasked256 + OpAMD64VPERMBMasked256 OpAMD64VPMADDUBSW256 OpAMD64VPMADDUBSWMasked256 OpAMD64VPAVGB512 @@ -1918,6 +1978,10 @@ const ( OpAMD64VPMAXUBMasked512 OpAMD64VPMINUB512 OpAMD64VPMINUBMasked512 + OpAMD64VPERMB512 + OpAMD64VPERMI2B512 + OpAMD64VPERMI2BMasked512 + OpAMD64VPERMBMasked512 OpAMD64VPMADDUBSW512 OpAMD64VPMADDUBSWMasked512 OpAMD64VRNDSCALEPS512 @@ -5207,6 +5271,14 @@ const ( OpOrUint16x16 OpPairwiseAddUint16x16 OpPairwiseSubUint16x16 + OpPermuteInt16x16 + OpPermuteUint16x16 + OpPermute2Uint16x16 + OpPermute2Int16x16 + OpPermute2MaskedUint16x16 + OpPermute2MaskedInt16x16 + OpPermuteMaskedUint16x16 + OpPermuteMaskedInt16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5250,6 +5322,14 @@ const ( OpMulHighMaskedUint16x32 OpNotEqualUint16x32 OpNotEqualMaskedUint16x32 + OpPermuteUint16x32 + OpPermuteInt16x32 + OpPermute2Int16x32 + OpPermute2Uint16x32 + OpPermute2MaskedUint16x32 + OpPermute2MaskedInt16x32 + OpPermuteMaskedUint16x32 + OpPermuteMaskedInt16x32 OpPopCountUint16x32 OpPopCountMaskedUint16x32 OpSaturatedAddUint16x32 @@ -5297,6 +5377,14 @@ const ( OpOrUint16x8 OpPairwiseAddUint16x8 OpPairwiseSubUint16x8 + OpPermuteUint16x8 + OpPermuteInt16x8 + OpPermute2Int16x8 + OpPermute2Uint16x8 + OpPermute2MaskedUint16x8 + OpPermute2MaskedInt16x8 + OpPermuteMaskedInt16x8 + OpPermuteMaskedUint16x8 OpPopCountUint16x8 OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 @@ -5342,6 +5430,18 @@ const ( OpNotEqualMaskedUint32x16 OpOrUint32x16 OpOrMaskedUint32x16 + OpPermuteInt32x16 + OpPermuteUint32x16 + OpPermuteFloat32x16 + OpPermute2Int32x16 + OpPermute2Uint32x16 + OpPermute2Float32x16 + OpPermute2MaskedUint32x16 + OpPermute2MaskedInt32x16 + OpPermute2MaskedFloat32x16 + OpPermuteMaskedUint32x16 + OpPermuteMaskedInt32x16 + OpPermuteMaskedFloat32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 @@ -5395,6 +5495,12 @@ const ( OpOrMaskedUint32x4 OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 + OpPermute2Uint32x4 + OpPermute2Float32x4 + OpPermute2Int32x4 + OpPermute2MaskedUint32x4 + OpPermute2MaskedInt32x4 + OpPermute2MaskedFloat32x4 OpPopCountUint32x4 OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 @@ -5448,6 +5554,18 @@ const ( OpOrMaskedUint32x8 OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 + OpPermuteInt32x8 + OpPermuteFloat32x8 + OpPermuteUint32x8 + OpPermute2Uint32x8 + OpPermute2Float32x8 + OpPermute2Int32x8 + OpPermute2MaskedFloat32x8 + OpPermute2MaskedUint32x8 + OpPermute2MaskedInt32x8 + OpPermuteMaskedInt32x8 + OpPermuteMaskedFloat32x8 + OpPermuteMaskedUint32x8 OpPopCountUint32x8 OpPopCountMaskedUint32x8 OpRotateLeftUint32x8 @@ -5500,6 +5618,12 @@ const ( OpNotEqualMaskedUint64x2 OpOrUint64x2 OpOrMaskedUint64x2 + OpPermute2Uint64x2 + OpPermute2Int64x2 + OpPermute2Float64x2 + OpPermute2MaskedUint64x2 + OpPermute2MaskedInt64x2 + OpPermute2MaskedFloat64x2 OpPopCountUint64x2 OpPopCountMaskedUint64x2 OpRotateLeftUint64x2 @@ -5548,6 +5672,18 @@ const ( OpNotEqualMaskedUint64x4 OpOrUint64x4 OpOrMaskedUint64x4 + OpPermuteUint64x4 + OpPermuteInt64x4 + OpPermuteFloat64x4 + OpPermute2Uint64x4 + OpPermute2Int64x4 + OpPermute2Float64x4 + OpPermute2MaskedInt64x4 + OpPermute2MaskedUint64x4 + OpPermute2MaskedFloat64x4 + OpPermuteMaskedFloat64x4 + OpPermuteMaskedInt64x4 + OpPermuteMaskedUint64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5596,6 +5732,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 + OpPermuteUint64x8 + OpPermuteInt64x8 + OpPermuteFloat64x8 + OpPermute2Int64x8 + OpPermute2Uint64x8 + OpPermute2Float64x8 + OpPermute2MaskedUint64x8 + OpPermute2MaskedInt64x8 + OpPermute2MaskedFloat64x8 + OpPermuteMaskedFloat64x8 + OpPermuteMaskedInt64x8 + OpPermuteMaskedUint64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -5643,6 +5791,14 @@ const ( OpNotEqualUint8x16 OpNotEqualMaskedUint8x16 OpOrUint8x16 + OpPermuteUint8x16 + OpPermuteInt8x16 + OpPermute2Uint8x16 + OpPermute2Int8x16 + OpPermute2MaskedInt8x16 + OpPermute2MaskedUint8x16 + OpPermuteMaskedInt8x16 + OpPermuteMaskedUint8x16 OpPopCountUint8x16 OpPopCountMaskedUint8x16 OpSaturatedAddUint8x16 @@ -5679,6 +5835,14 @@ const ( OpNotEqualUint8x32 OpNotEqualMaskedUint8x32 OpOrUint8x32 + OpPermuteUint8x32 + OpPermuteInt8x32 + OpPermute2Int8x32 + OpPermute2Uint8x32 + OpPermute2MaskedUint8x32 + OpPermute2MaskedInt8x32 + OpPermuteMaskedUint8x32 + OpPermuteMaskedInt8x32 OpPopCountUint8x32 OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 @@ -5712,6 +5876,14 @@ const ( OpMinMaskedUint8x64 OpNotEqualUint8x64 OpNotEqualMaskedUint8x64 + OpPermuteUint8x64 + OpPermuteInt8x64 + OpPermute2Int8x64 + OpPermute2Uint8x64 + OpPermute2MaskedUint8x64 + OpPermute2MaskedInt8x64 + OpPermuteMaskedInt8x64 + OpPermuteMaskedUint8x64 OpPopCountUint8x64 OpPopCountMaskedUint8x64 OpSaturatedAddUint8x64 @@ -27735,6 +27907,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMW256", + argLen: 2, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2W256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2WMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMWMasked256", + argLen: 3, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLW256", argLen: 2, @@ -27917,6 +28151,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMW512", + argLen: 2, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2W512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2WMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMWMasked512", + argLen: 3, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLW512", argLen: 2, @@ -28099,6 +28395,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMW128", + argLen: 2, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2W128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2WMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMWMasked128", + argLen: 3, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLW128", argLen: 2, @@ -28219,6 +28577,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMPS512", + argLen: 2, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMD512", + argLen: 2, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2D512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2DMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPSMasked512", + argLen: 3, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMDMasked512", + argLen: 3, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLD512", argLen: 2, @@ -28354,6 +28836,72 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMI2D128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2DMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLD128", argLen: 2, @@ -28489,6 +29037,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMD256", + argLen: 2, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPS256", + argLen: 2, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2D256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2DMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPSMasked256", + argLen: 3, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMDMasked256", + argLen: 3, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLD256", argLen: 2, @@ -28625,6 +29297,72 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMI2PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2Q128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2QMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLQ128", argLen: 2, @@ -28761,6 +29499,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMQ256", + argLen: 2, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPD256", + argLen: 2, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2Q256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2QMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPDMasked256", + argLen: 3, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMQMasked256", + argLen: 3, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLQ256", argLen: 2, @@ -28912,6 +29774,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMPD512", + argLen: 2, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMQ512", + argLen: 2, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2Q512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2QMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPDMasked512", + argLen: 3, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMQMasked512", + argLen: 3, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLQ512", argLen: 2, @@ -29092,6 +30078,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMB128", + argLen: 2, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2B128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2BMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMBMasked128", + argLen: 3, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMADDUBSW128", argLen: 2, @@ -29243,6 +30291,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMB256", + argLen: 2, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2B256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2BMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMBMasked256", + argLen: 3, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMADDUBSW256", argLen: 2, @@ -29394,6 +30504,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMB512", + argLen: 2, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2B512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2BMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMBMasked512", + argLen: 3, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMADDUBSW512", argLen: 2, @@ -64012,6 +65184,46 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PermuteInt16x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteUint16x16", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint16x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int16x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint16x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt16x16", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint16x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "PopCountUint16x16", argLen: 1, @@ -64244,6 +65456,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint16x32", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt16x32", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int16x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint16x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint16x32", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt16x32", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "PopCountUint16x32", argLen: 1, @@ -64497,6 +65749,46 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PermuteUint16x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int16x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint16x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint16x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt16x8", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt16x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "PopCountUint16x8", argLen: 1, @@ -64739,6 +66031,66 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteInt32x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteUint32x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int32x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint32x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float32x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint32x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt32x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint32x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedFloat32x16", + argLen: 3, + generic: true, + }, { name: "PopCountUint32x16", argLen: 1, @@ -65021,6 +66373,36 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Permute2Uint32x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float32x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int32x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint32x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat32x4", + argLen: 4, + generic: true, + }, { name: "PopCountUint32x4", argLen: 1, @@ -65303,6 +66685,66 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PermuteInt32x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteUint32x8", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint32x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float32x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int32x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedUint32x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt32x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint32x8", + argLen: 3, + generic: true, + }, { name: "PopCountUint32x8", argLen: 1, @@ -65581,6 +67023,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "Permute2Uint64x2", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int64x2", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float64x2", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint64x2", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt64x2", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat64x2", + argLen: 4, + generic: true, + }, { name: "PopCountUint64x2", argLen: 1, @@ -65839,6 +67311,66 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint64x4", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt64x4", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint64x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int64x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float64x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedInt64x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedUint64x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt64x4", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint64x4", + argLen: 3, + generic: true, + }, { name: "PopCountUint64x4", argLen: 1, @@ -66097,6 +67629,66 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint64x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt64x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int64x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint64x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float64x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint64x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt64x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "PopCountUint64x8", argLen: 1, @@ -66348,6 +67940,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint8x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint8x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int8x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedInt8x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedUint8x16", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt8x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "PopCountUint8x16", argLen: 1, @@ -66545,6 +68177,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint8x32", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt8x32", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int8x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint8x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint8x32", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt8x32", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint8x32", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt8x32", + argLen: 3, + generic: true, + }, { name: "PopCountUint8x32", argLen: 1, @@ -66725,6 +68397,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint8x64", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt8x64", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int8x64", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint8x64", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint8x64", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt8x64", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt8x64", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint8x64", + argLen: 3, + generic: true, + }, { name: "PopCountUint8x64", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6d10b009bb90cf..1aa36bee04202a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3298,6 +3298,276 @@ func rewriteValueAMD64(v *Value) bool { return true case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) + case OpPermute2Float32x16: + v.Op = OpAMD64VPERMI2PS512 + return true + case OpPermute2Float32x4: + v.Op = OpAMD64VPERMI2PS128 + return true + case OpPermute2Float32x8: + v.Op = OpAMD64VPERMI2PS256 + return true + case OpPermute2Float64x2: + v.Op = OpAMD64VPERMI2PD128 + return true + case OpPermute2Float64x4: + v.Op = OpAMD64VPERMI2PD256 + return true + case OpPermute2Float64x8: + v.Op = OpAMD64VPERMI2PD512 + return true + case OpPermute2Int16x16: + v.Op = OpAMD64VPERMI2W256 + return true + case OpPermute2Int16x32: + v.Op = OpAMD64VPERMI2W512 + return true + case OpPermute2Int16x8: + v.Op = OpAMD64VPERMI2W128 + return true + case OpPermute2Int32x16: + v.Op = OpAMD64VPERMI2D512 + return true + case OpPermute2Int32x4: + v.Op = OpAMD64VPERMI2D128 + return true + case OpPermute2Int32x8: + v.Op = OpAMD64VPERMI2D256 + return true + case OpPermute2Int64x2: + v.Op = OpAMD64VPERMI2Q128 + return true + case OpPermute2Int64x4: + v.Op = OpAMD64VPERMI2Q256 + return true + case OpPermute2Int64x8: + v.Op = OpAMD64VPERMI2Q512 + return true + case OpPermute2Int8x16: + v.Op = OpAMD64VPERMI2B128 + return true + case OpPermute2Int8x32: + v.Op = OpAMD64VPERMI2B256 + return true + case OpPermute2Int8x64: + v.Op = OpAMD64VPERMI2B512 + return true + case OpPermute2MaskedFloat32x16: + return rewriteValueAMD64_OpPermute2MaskedFloat32x16(v) + case OpPermute2MaskedFloat32x4: + return rewriteValueAMD64_OpPermute2MaskedFloat32x4(v) + case OpPermute2MaskedFloat32x8: + return rewriteValueAMD64_OpPermute2MaskedFloat32x8(v) + case OpPermute2MaskedFloat64x2: + return rewriteValueAMD64_OpPermute2MaskedFloat64x2(v) + case OpPermute2MaskedFloat64x4: + return rewriteValueAMD64_OpPermute2MaskedFloat64x4(v) + case OpPermute2MaskedFloat64x8: + return rewriteValueAMD64_OpPermute2MaskedFloat64x8(v) + case OpPermute2MaskedInt16x16: + return rewriteValueAMD64_OpPermute2MaskedInt16x16(v) + case OpPermute2MaskedInt16x32: + return rewriteValueAMD64_OpPermute2MaskedInt16x32(v) + case OpPermute2MaskedInt16x8: + return rewriteValueAMD64_OpPermute2MaskedInt16x8(v) + case OpPermute2MaskedInt32x16: + return rewriteValueAMD64_OpPermute2MaskedInt32x16(v) + case OpPermute2MaskedInt32x4: + return rewriteValueAMD64_OpPermute2MaskedInt32x4(v) + case OpPermute2MaskedInt32x8: + return rewriteValueAMD64_OpPermute2MaskedInt32x8(v) + case OpPermute2MaskedInt64x2: + return rewriteValueAMD64_OpPermute2MaskedInt64x2(v) + case OpPermute2MaskedInt64x4: + return rewriteValueAMD64_OpPermute2MaskedInt64x4(v) + case OpPermute2MaskedInt64x8: + return rewriteValueAMD64_OpPermute2MaskedInt64x8(v) + case OpPermute2MaskedInt8x16: + return rewriteValueAMD64_OpPermute2MaskedInt8x16(v) + case OpPermute2MaskedInt8x32: + return rewriteValueAMD64_OpPermute2MaskedInt8x32(v) + case OpPermute2MaskedInt8x64: + return rewriteValueAMD64_OpPermute2MaskedInt8x64(v) + case OpPermute2MaskedUint16x16: + return rewriteValueAMD64_OpPermute2MaskedUint16x16(v) + case OpPermute2MaskedUint16x32: + return rewriteValueAMD64_OpPermute2MaskedUint16x32(v) + case OpPermute2MaskedUint16x8: + return rewriteValueAMD64_OpPermute2MaskedUint16x8(v) + case OpPermute2MaskedUint32x16: + return rewriteValueAMD64_OpPermute2MaskedUint32x16(v) + case OpPermute2MaskedUint32x4: + return rewriteValueAMD64_OpPermute2MaskedUint32x4(v) + case OpPermute2MaskedUint32x8: + return rewriteValueAMD64_OpPermute2MaskedUint32x8(v) + case OpPermute2MaskedUint64x2: + return rewriteValueAMD64_OpPermute2MaskedUint64x2(v) + case OpPermute2MaskedUint64x4: + return rewriteValueAMD64_OpPermute2MaskedUint64x4(v) + case OpPermute2MaskedUint64x8: + return rewriteValueAMD64_OpPermute2MaskedUint64x8(v) + case OpPermute2MaskedUint8x16: + return rewriteValueAMD64_OpPermute2MaskedUint8x16(v) + case OpPermute2MaskedUint8x32: + return rewriteValueAMD64_OpPermute2MaskedUint8x32(v) + case OpPermute2MaskedUint8x64: + return rewriteValueAMD64_OpPermute2MaskedUint8x64(v) + case OpPermute2Uint16x16: + v.Op = OpAMD64VPERMI2W256 + return true + case OpPermute2Uint16x32: + v.Op = OpAMD64VPERMI2W512 + return true + case OpPermute2Uint16x8: + v.Op = OpAMD64VPERMI2W128 + return true + case OpPermute2Uint32x16: + v.Op = OpAMD64VPERMI2D512 + return true + case OpPermute2Uint32x4: + v.Op = OpAMD64VPERMI2D128 + return true + case OpPermute2Uint32x8: + v.Op = OpAMD64VPERMI2D256 + return true + case OpPermute2Uint64x2: + v.Op = OpAMD64VPERMI2Q128 + return true + case OpPermute2Uint64x4: + v.Op = OpAMD64VPERMI2Q256 + return true + case OpPermute2Uint64x8: + v.Op = OpAMD64VPERMI2Q512 + return true + case OpPermute2Uint8x16: + v.Op = OpAMD64VPERMI2B128 + return true + case OpPermute2Uint8x32: + v.Op = OpAMD64VPERMI2B256 + return true + case OpPermute2Uint8x64: + v.Op = OpAMD64VPERMI2B512 + return true + case OpPermuteFloat32x16: + v.Op = OpAMD64VPERMPS512 + return true + case OpPermuteFloat32x8: + v.Op = OpAMD64VPERMPS256 + return true + case OpPermuteFloat64x4: + v.Op = OpAMD64VPERMPD256 + return true + case OpPermuteFloat64x8: + v.Op = OpAMD64VPERMPD512 + return true + case OpPermuteInt16x16: + v.Op = OpAMD64VPERMW256 + return true + case OpPermuteInt16x32: + v.Op = OpAMD64VPERMW512 + return true + case OpPermuteInt16x8: + v.Op = OpAMD64VPERMW128 + return true + case OpPermuteInt32x16: + v.Op = OpAMD64VPERMD512 + return true + case OpPermuteInt32x8: + v.Op = OpAMD64VPERMD256 + return true + case OpPermuteInt64x4: + v.Op = OpAMD64VPERMQ256 + return true + case OpPermuteInt64x8: + v.Op = OpAMD64VPERMQ512 + return true + case OpPermuteInt8x16: + v.Op = OpAMD64VPERMB128 + return true + case OpPermuteInt8x32: + v.Op = OpAMD64VPERMB256 + return true + case OpPermuteInt8x64: + v.Op = OpAMD64VPERMB512 + return true + case OpPermuteMaskedFloat32x16: + return rewriteValueAMD64_OpPermuteMaskedFloat32x16(v) + case OpPermuteMaskedFloat32x8: + return rewriteValueAMD64_OpPermuteMaskedFloat32x8(v) + case OpPermuteMaskedFloat64x4: + return rewriteValueAMD64_OpPermuteMaskedFloat64x4(v) + case OpPermuteMaskedFloat64x8: + return rewriteValueAMD64_OpPermuteMaskedFloat64x8(v) + case OpPermuteMaskedInt16x16: + return rewriteValueAMD64_OpPermuteMaskedInt16x16(v) + case OpPermuteMaskedInt16x32: + return rewriteValueAMD64_OpPermuteMaskedInt16x32(v) + case OpPermuteMaskedInt16x8: + return rewriteValueAMD64_OpPermuteMaskedInt16x8(v) + case OpPermuteMaskedInt32x16: + return rewriteValueAMD64_OpPermuteMaskedInt32x16(v) + case OpPermuteMaskedInt32x8: + return rewriteValueAMD64_OpPermuteMaskedInt32x8(v) + case OpPermuteMaskedInt64x4: + return rewriteValueAMD64_OpPermuteMaskedInt64x4(v) + case OpPermuteMaskedInt64x8: + return rewriteValueAMD64_OpPermuteMaskedInt64x8(v) + case OpPermuteMaskedInt8x16: + return rewriteValueAMD64_OpPermuteMaskedInt8x16(v) + case OpPermuteMaskedInt8x32: + return rewriteValueAMD64_OpPermuteMaskedInt8x32(v) + case OpPermuteMaskedInt8x64: + return rewriteValueAMD64_OpPermuteMaskedInt8x64(v) + case OpPermuteMaskedUint16x16: + return rewriteValueAMD64_OpPermuteMaskedUint16x16(v) + case OpPermuteMaskedUint16x32: + return rewriteValueAMD64_OpPermuteMaskedUint16x32(v) + case OpPermuteMaskedUint16x8: + return rewriteValueAMD64_OpPermuteMaskedUint16x8(v) + case OpPermuteMaskedUint32x16: + return rewriteValueAMD64_OpPermuteMaskedUint32x16(v) + case OpPermuteMaskedUint32x8: + return rewriteValueAMD64_OpPermuteMaskedUint32x8(v) + case OpPermuteMaskedUint64x4: + return rewriteValueAMD64_OpPermuteMaskedUint64x4(v) + case OpPermuteMaskedUint64x8: + return rewriteValueAMD64_OpPermuteMaskedUint64x8(v) + case OpPermuteMaskedUint8x16: + return rewriteValueAMD64_OpPermuteMaskedUint8x16(v) + case OpPermuteMaskedUint8x32: + return rewriteValueAMD64_OpPermuteMaskedUint8x32(v) + case OpPermuteMaskedUint8x64: + return rewriteValueAMD64_OpPermuteMaskedUint8x64(v) + case OpPermuteUint16x16: + v.Op = OpAMD64VPERMW256 + return true + case OpPermuteUint16x32: + v.Op = OpAMD64VPERMW512 + return true + case OpPermuteUint16x8: + v.Op = OpAMD64VPERMW128 + return true + case OpPermuteUint32x16: + v.Op = OpAMD64VPERMD512 + return true + case OpPermuteUint32x8: + v.Op = OpAMD64VPERMD256 + return true + case OpPermuteUint64x4: + v.Op = OpAMD64VPERMQ256 + return true + case OpPermuteUint64x8: + v.Op = OpAMD64VPERMQ512 + return true + case OpPermuteUint8x16: + v.Op = OpAMD64VPERMB128 + return true + case OpPermuteUint8x32: + v.Op = OpAMD64VPERMB256 + return true + case OpPermuteUint8x64: + v.Op = OpAMD64VPERMB512 + return true case OpPopCount16: return rewriteValueAMD64_OpPopCount16(v) case OpPopCount32: @@ -44315,6 +44585,1038 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } return false } +func rewriteValueAMD64_OpPermute2MaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat32x16 x y z mask) + // result: (VPERMI2PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat32x4 x y z mask) + // result: (VPERMI2PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat32x8 x y z mask) + // result: (VPERMI2PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat64x2 x y z mask) + // result: (VPERMI2PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat64x4 x y z mask) + // result: (VPERMI2PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat64x8 x y z mask) + // result: (VPERMI2PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt16x16 x y z mask) + // result: (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt16x32 x y z mask) + // result: (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt16x8 x y z mask) + // result: (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt32x16 x y z mask) + // result: (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt32x4 x y z mask) + // result: (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt32x8 x y z mask) + // result: (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt64x2 x y z mask) + // result: (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt64x4 x y z mask) + // result: (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt64x8 x y z mask) + // result: (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt8x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt8x16 x y z mask) + // result: (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt8x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt8x32 x y z mask) + // result: (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt8x64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt8x64 x y z mask) + // result: (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint16x16 x y z mask) + // result: (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint16x32 x y z mask) + // result: (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint16x8 x y z mask) + // result: (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint32x16 x y z mask) + // result: (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint32x4 x y z mask) + // result: (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint32x8 x y z mask) + // result: (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint64x2 x y z mask) + // result: (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint64x4 x y z mask) + // result: (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint64x8 x y z mask) + // result: (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint8x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint8x16 x y z mask) + // result: (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint8x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint8x32 x y z mask) + // result: (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint8x64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint8x64 x y z mask) + // result: (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat32x16 x y mask) + // result: (VPERMPSMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat32x8 x y mask) + // result: (VPERMPSMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat64x4 x y mask) + // result: (VPERMPDMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat64x8 x y mask) + // result: (VPERMPDMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt16x16 x y mask) + // result: (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt16x32 x y mask) + // result: (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt16x8 x y mask) + // result: (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt32x16 x y mask) + // result: (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt32x8 x y mask) + // result: (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt64x4 x y mask) + // result: (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt64x8 x y mask) + // result: (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt8x16 x y mask) + // result: (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt8x32 x y mask) + // result: (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt8x64 x y mask) + // result: (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint16x16 x y mask) + // result: (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint16x32 x y mask) + // result: (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint16x8 x y mask) + // result: (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint32x16 x y mask) + // result: (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint32x8 x y mask) + // result: (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint64x4 x y mask) + // result: (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint64x8 x y mask) + // result: (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint8x16 x y mask) + // result: (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint8x32 x y mask) + // result: (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint8x64 x y mask) + // result: (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpPopCount16(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index c47b0898150b97..fd7ebb20a34015 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1622,18 +1622,42 @@ func opLen2(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } +func opLen2_21(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(op, t, args[1], args[0]) + } +} + func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, t, args[0], args[1], args[2]) } } +func opLen3_21(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[1], args[0], args[2]) + } +} + +func opLen3_231(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[2], args[0], args[1]) + } +} + func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue4(op, t, args[0], args[1], args[2], args[3]) } } +func opLen4_231(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[2], args[0], args[1], args[3]) + } +} + func plainPanicSimdImm(s *state) { cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) cmp.AuxInt = 0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 58bc420fc4e304..3805ca35a872c9 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -996,6 +996,114 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Permute", opLen2_21(ssa.OpPermuteUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Permute", opLen2_21(ssa.OpPermuteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Permute", opLen2_21(ssa.OpPermuteUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Permute", opLen2_21(ssa.OpPermuteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Permute", opLen2_21(ssa.OpPermuteUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Permute", opLen2_21(ssa.OpPermuteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Permute", opLen2_21(ssa.OpPermuteUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Permute", opLen2_21(ssa.OpPermuteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Permute", opLen2_21(ssa.OpPermuteUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.Permute", opLen2_21(ssa.OpPermuteFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Permute", opLen2_21(ssa.OpPermuteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Permute", opLen2_21(ssa.OpPermuteUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Permute", opLen2_21(ssa.OpPermuteFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Permute", opLen2_21(ssa.OpPermuteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Permute", opLen2_21(ssa.OpPermuteUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.Permute", opLen2_21(ssa.OpPermuteFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Permute", opLen2_21(ssa.OpPermuteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Permute", opLen2_21(ssa.OpPermuteUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Permute", opLen2_21(ssa.OpPermuteFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Permute", opLen2_21(ssa.OpPermuteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Permute", opLen2_21(ssa.OpPermuteUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Permute2", opLen3_231(ssa.OpPermute2Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute2", opLen3_231(ssa.OpPermute2Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Permute2", opLen3_231(ssa.OpPermute2Int8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Permute2", opLen3_231(ssa.OpPermute2Uint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Permute2", opLen3_231(ssa.OpPermute2Int8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Permute2", opLen3_231(ssa.OpPermute2Uint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Permute2", opLen3_231(ssa.OpPermute2Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Permute2", opLen3_231(ssa.OpPermute2Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Permute2", opLen3_231(ssa.OpPermute2Int16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Permute2", opLen3_231(ssa.OpPermute2Uint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Permute2", opLen3_231(ssa.OpPermute2Int16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Permute2", opLen3_231(ssa.OpPermute2Uint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Permute2", opLen3_231(ssa.OpPermute2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Permute2", opLen3_231(ssa.OpPermute2Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Permute2", opLen3_231(ssa.OpPermute2Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Permute2", opLen3_231(ssa.OpPermute2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Permute2", opLen3_231(ssa.OpPermute2Int32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Permute2", opLen3_231(ssa.OpPermute2Uint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Permute2", opLen3_231(ssa.OpPermute2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Permute2", opLen3_231(ssa.OpPermute2Int32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Permute2", opLen3_231(ssa.OpPermute2Uint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Permute2", opLen3_231(ssa.OpPermute2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Permute2", opLen3_231(ssa.OpPermute2Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Permute2", opLen3_231(ssa.OpPermute2Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Permute2", opLen3_231(ssa.OpPermute2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Permute2", opLen3_231(ssa.OpPermute2Int64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Permute2", opLen3_231(ssa.OpPermute2Uint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Permute2", opLen3_231(ssa.OpPermute2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Permute2", opLen3_231(ssa.OpPermute2Int64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Permute2", opLen3_231(ssa.OpPermute2Uint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 7a8780e5cba790..29899f8cb13a9c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5391,6 +5391,830 @@ func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 // Asm: VPHSUBD, CPU Feature: AVX2 func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +/* Permute */ + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x16) Permute(indices Uint8x16) Int8x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x32) Permute(indices Uint8x32) Int8x32 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x32) Permute(indices Uint8x32) Uint8x32 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x64) Permute(indices Uint8x64) Int8x64 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x8) Permute(indices Uint16x8) Int16x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x16) Permute(indices Uint16x16) Int16x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x32) Permute(indices Uint16x32) Int16x32 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x32) Permute(indices Uint16x32) Uint16x32 + +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX2 +func (x Float32x8) Permute(indices Uint32x8) Float32x8 + +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX2 +func (x Int32x8) Permute(indices Uint32x8) Int32x8 + +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX2 +func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX512F +func (x Float32x16) Permute(indices Uint32x16) Float32x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x16) Permute(indices Uint32x16) Int32x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x4) Permute(indices Uint64x4) Float64x4 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x4) Permute(indices Uint64x4) Int64x4 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x8) Permute(indices Uint64x8) Float64x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x8) Permute(indices Uint64x8) Int64x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x8) Permute(indices Uint64x8) Uint64x8 + +/* Permute2 */ + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x16) Permute2(y Int8x16, indices Uint8x16) Int8x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x16) Permute2(y Uint8x16, indices Uint8x16) Uint8x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x32) Permute2(y Int8x32, indices Uint8x32) Int8x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x32) Permute2(y Uint8x32, indices Uint8x32) Uint8x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x64) Permute2(y Int8x64, indices Uint8x64) Int8x64 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x64) Permute2(y Uint8x64, indices Uint8x64) Uint8x64 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 + +/* Permute2Masked */ + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, u Mask8x16) Int8x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, u Mask8x16) Uint8x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, u Mask8x32) Int8x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, u Mask8x32) Uint8x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, u Mask8x64) Int8x64 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, u Mask8x64) Uint8x64 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, u Mask16x8) Int16x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, u Mask16x8) Uint16x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, u Mask16x16) Int16x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, u Mask16x16) Uint16x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, u Mask16x32) Int16x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, u Mask16x32) Uint16x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, u Mask32x4) Float32x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, u Mask32x4) Int32x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, u Mask32x4) Uint32x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, u Mask32x8) Float32x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, u Mask32x8) Int32x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, u Mask32x8) Uint32x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, u Mask32x16) Float32x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, u Mask32x16) Int32x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, u Mask32x16) Uint32x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, u Mask64x2) Float64x2 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, u Mask64x2) Int64x2 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, u Mask64x2) Uint64x2 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, u Mask64x4) Float64x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, u Mask64x4) Int64x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, u Mask64x4) Uint64x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, u Mask64x8) Float64x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, u Mask64x8) Int64x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, u Mask64x8) Uint64x8 + +/* PermuteMasked */ + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Int8x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Uint8x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Int8x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Uint8x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Int8x64 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Uint8x64 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Int16x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Uint16x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Int16x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Uint16x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Int16x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Uint16x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX512F +func (x Float32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Float32x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Int32x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Uint32x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX512F +func (x Float32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Float32x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Int32x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Uint32x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Float64x4 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Int64x4 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Uint64x4 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Float64x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Int64x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Uint64x8 + /* PopCount */ // PopCount counts the number of set bits in each element. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 36923319ff312c..f1a2f11738c2e5 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -151,6 +151,41 @@ func TestMaskedAdd(t *testing.T) { testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "AddMasked") } +func TestPermute(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := []int64{1, 2, 3, 4, 5, 6, 7, 8} + indices := []uint64{7, 6, 5, 4, 3, 2, 1, 0} + want := []int64{8, 7, 6, 5, 4, 3, 2, 1} + got := make([]int64, 8) + simd.LoadInt64x8Slice(x).Permute(simd.LoadUint64x8Slice(indices)).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermute2(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := []int64{1, 2, 3, 4, 5, 6, 7, 8} + y := []int64{-1, -2, -3, -4, -5, -6, -7, -8} + indices := []uint64{7 + 8, 6, 5 + 8, 4, 3 + 8, 2, 1 + 8, 0} + want := []int64{-8, 7, -6, 5, -4, 3, -2, 1} + got := make([]int64, 8) + simd.LoadInt64x8Slice(x).Permute2(simd.LoadInt64x8Slice(y), simd.LoadUint64x8Slice(indices)).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 6466684068e964..29452bdad0e9b4 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7800,6 +7800,10 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // GaloisFieldAffineTransformMasked // Get128 // GetElem +// Permute +// Permute2 +// Permute2Masked +// PermuteMasked // RotateAllLeft // RotateAllLeftMasked // RotateAllRight From 17baae72db6f31275383ecb091ee3ec722e290ad Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 19:44:57 +0000 Subject: [PATCH 086/139] [dev.simd] simd: default mask param's name to mask This CL is generated by CL 687920. Change-Id: Iab0d7c28c923380df51806ba572ec59f9b031de8 Reviewed-on: https://go-review.googlesource.com/c/go/+/687955 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/ops_amd64.go | 1632 ++++++++++++++++++++--------------------- 1 file changed, 816 insertions(+), 816 deletions(-) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 29899f8cb13a9c..ebb626358f8a24 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -71,62 +71,62 @@ func (x Int64x8) Absolute() Int64x8 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 +func (x Int8x16) AbsoluteMasked(mask Mask8x16) Int8x16 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 +func (x Int8x32) AbsoluteMasked(mask Mask8x32) Int8x32 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 +func (x Int8x64) AbsoluteMasked(mask Mask8x64) Int8x64 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 +func (x Int16x8) AbsoluteMasked(mask Mask16x8) Int16x8 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 +func (x Int16x16) AbsoluteMasked(mask Mask16x16) Int16x16 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 +func (x Int16x32) AbsoluteMasked(mask Mask16x32) Int16x32 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 +func (x Int32x4) AbsoluteMasked(mask Mask32x4) Int32x4 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 +func (x Int32x8) AbsoluteMasked(mask Mask32x8) Int32x8 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 +func (x Int32x16) AbsoluteMasked(mask Mask32x16) Int32x16 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 +func (x Int64x2) AbsoluteMasked(mask Mask64x2) Int64x2 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 +func (x Int64x4) AbsoluteMasked(mask Mask64x4) Int64x4 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 +func (x Int64x8) AbsoluteMasked(mask Mask64x8) Int64x8 /* Add */ @@ -285,152 +285,152 @@ func (x Uint64x8) Add(y Uint64x8) Uint64x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512F -func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) AddMasked(y Float32x4, mask Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512F -func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) AddMasked(y Float32x8, mask Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512F -func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) AddMasked(y Float32x16, mask Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512F -func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) AddMasked(y Float64x2, mask Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512F -func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) AddMasked(y Float64x4, mask Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512F -func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) AddMasked(y Float64x8, mask Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) AddMasked(y Int8x16, mask Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) AddMasked(y Int8x32, mask Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) AddMasked(y Int8x64, mask Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) AddMasked(y Int16x8, mask Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) AddMasked(y Int16x16, mask Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) AddMasked(y Int16x32, mask Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) AddMasked(y Int32x4, mask Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) AddMasked(y Int32x8, mask Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) AddMasked(y Int32x16, mask Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) AddMasked(y Int64x2, mask Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) AddMasked(y Int64x4, mask Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) AddMasked(y Int64x8, mask Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) AddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) AddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) AddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) AddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) AddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) AddMasked(y Uint16x32, mask Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) AddMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) AddMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) AddMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) AddMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AddSub */ @@ -561,62 +561,62 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) AndMasked(y Int32x4, mask Mask32x4) Int32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) AndMasked(y Int32x8, mask Mask32x8) Int32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) AndMasked(y Int32x16, mask Mask32x16) Int32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) AndMasked(y Int64x2, mask Mask64x2) Int64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) AndMasked(y Int64x4, mask Mask64x4) Int64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) AndMasked(y Int64x8, mask Mask64x8) Int64x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) AndMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) AndMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) AndMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) AndMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) AndMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AndNot */ @@ -725,62 +725,62 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* ApproximateReciprocal */ @@ -819,32 +819,32 @@ func (x Float64x8) ApproximateReciprocal() Float64x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 +func (x Float32x4) ApproximateReciprocalMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 +func (x Float32x8) ApproximateReciprocalMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 +func (x Float32x16) ApproximateReciprocalMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 +func (x Float64x2) ApproximateReciprocalMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 +func (x Float64x4) ApproximateReciprocalMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 +func (x Float64x8) ApproximateReciprocalMasked(mask Mask64x8) Float64x8 /* ApproximateReciprocalOfSqrt */ @@ -883,32 +883,32 @@ func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 +func (x Float32x4) ApproximateReciprocalOfSqrtMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 +func (x Float32x8) ApproximateReciprocalOfSqrtMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 +func (x Float32x16) ApproximateReciprocalOfSqrtMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 +func (x Float64x2) ApproximateReciprocalOfSqrtMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 +func (x Float64x4) ApproximateReciprocalOfSqrtMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 +func (x Float64x8) ApproximateReciprocalOfSqrtMasked(mask Mask64x8) Float64x8 /* Average */ @@ -947,32 +947,32 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512BW -func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) AverageMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512BW -func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) AverageMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512BW -func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) AverageMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512BW -func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) AverageMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512BW -func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) AverageMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512BW -func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) AverageMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Ceil */ @@ -1047,42 +1047,42 @@ func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) CeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) CeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) CeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) CeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) CeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) CeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) CeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) CeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) CeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithCeilWithPrecision */ @@ -1135,42 +1135,42 @@ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithFloorWithPrecision */ @@ -1223,42 +1223,42 @@ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithRoundWithPrecision */ @@ -1311,42 +1311,42 @@ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithTruncWithPrecision */ @@ -1399,42 +1399,42 @@ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* Div */ @@ -1473,32 +1473,32 @@ func (x Float64x8) Div(y Float64x8) Float64x8 // DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) DivMasked(y Float32x4, mask Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) DivMasked(y Float32x8, mask Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) DivMasked(y Float32x16, mask Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512F -func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) DivMasked(y Float64x2, mask Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512F -func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512F -func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 /* DotProdBroadcast */ @@ -1674,152 +1674,152 @@ func (x Uint64x8) Equal(y Uint64x8) Mask64x8 // EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) EqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) EqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) EqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) EqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) EqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) EqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) EqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) EqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) EqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) EqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) EqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) EqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) EqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) EqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) EqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) EqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) EqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) EqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) EqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) EqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) EqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) EqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) EqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) EqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) EqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) EqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) EqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) EqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Floor */ @@ -1894,42 +1894,42 @@ func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) FloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) FloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) FloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) FloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) FloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) FloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) FloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) FloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) FloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) FloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) FloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) FloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* FusedMultiplyAdd */ @@ -1968,32 +1968,32 @@ func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplyAddSub */ @@ -2032,32 +2032,32 @@ func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplySubAdd */ @@ -2096,32 +2096,32 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* GaloisFieldAffineTransform */ @@ -2283,19 +2283,19 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, mask Mask8x16) Uint8x16 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, mask Mask8x32) Uint8x32 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 /* Get128 */ @@ -2736,304 +2736,304 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) GreaterEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) GreaterEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) GreaterEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) GreaterEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) GreaterEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) GreaterEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) GreaterEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) GreaterEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) GreaterEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) GreaterEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) GreaterEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) GreaterEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) GreaterEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) GreaterEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) GreaterEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) GreaterEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) GreaterEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) GreaterEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) GreaterEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) GreaterEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) GreaterEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) GreaterEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) GreaterEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) GreaterEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) GreaterEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) GreaterEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) GreaterEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) GreaterEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) GreaterEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* GreaterMasked */ // GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) GreaterMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) GreaterMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) GreaterMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) GreaterMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) GreaterMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) GreaterMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) GreaterMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) GreaterMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) GreaterMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) GreaterMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) GreaterMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) GreaterMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) GreaterMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) GreaterMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) GreaterMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) GreaterMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) GreaterMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) GreaterMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) GreaterMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) GreaterMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) GreaterMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) GreaterMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) GreaterMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) GreaterMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) GreaterMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) GreaterMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) GreaterMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) GreaterMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) GreaterMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) GreaterMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* IsNan */ @@ -3072,32 +3072,32 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) IsNanMasked(y Float32x4, mask Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) IsNanMasked(y Float32x8, mask Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) IsNanMasked(y Float32x16, mask Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) IsNanMasked(y Float64x2, mask Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) IsNanMasked(y Float64x4, mask Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) IsNanMasked(y Float64x8, mask Mask64x8) Mask64x8 /* Less */ @@ -3408,304 +3408,304 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) LessEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) LessEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) LessEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) LessEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) LessEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) LessEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) LessEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) LessEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) LessEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) LessEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) LessEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) LessEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) LessEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) LessEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) LessEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) LessEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) LessEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) LessEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) LessEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) LessEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) LessEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) LessEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) LessEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) LessEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) LessEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) LessEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) LessEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) LessEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) LessEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* LessMasked */ // LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) LessMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) LessMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) LessMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) LessMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) LessMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) LessMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) LessMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) LessMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) LessMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) LessMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) LessMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) LessMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) LessMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) LessMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) LessMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) LessMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) LessMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) LessMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) LessMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) LessMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) LessMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) LessMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) LessMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) LessMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) LessMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) LessMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) LessMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) LessMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) LessMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) LessMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Max */ @@ -3864,152 +3864,152 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512F -func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MaxMasked(y Float32x4, mask Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512F -func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MaxMasked(y Float32x8, mask Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512F -func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MaxMasked(y Float32x16, mask Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512F -func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MaxMasked(y Float64x2, mask Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512F -func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MaxMasked(y Float64x4, mask Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512F -func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MaxMasked(y Float64x8, mask Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512BW -func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) MaxMasked(y Int8x16, mask Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512BW -func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) MaxMasked(y Int8x32, mask Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512BW -func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) MaxMasked(y Int8x64, mask Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512BW -func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MaxMasked(y Int16x8, mask Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512BW -func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MaxMasked(y Int16x16, mask Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512BW -func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MaxMasked(y Int16x32, mask Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512F -func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) MaxMasked(y Int32x4, mask Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512F -func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) MaxMasked(y Int32x8, mask Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512F -func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) MaxMasked(y Int32x16, mask Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512F -func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MaxMasked(y Int64x2, mask Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512F -func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MaxMasked(y Int64x4, mask Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512F -func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MaxMasked(y Int64x8, mask Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512BW -func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) MaxMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512BW -func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) MaxMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512BW -func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) MaxMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512BW -func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) MaxMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512BW -func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) MaxMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512BW -func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) MaxMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512F -func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) MaxMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512F -func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) MaxMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512F -func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) MaxMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512F -func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) MaxMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512F -func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) MaxMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512F -func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) MaxMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Min */ @@ -4168,152 +4168,152 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512F -func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MinMasked(y Float32x4, mask Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512F -func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MinMasked(y Float32x8, mask Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512F -func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MinMasked(y Float32x16, mask Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512F -func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MinMasked(y Float64x2, mask Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512F -func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MinMasked(y Float64x4, mask Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512F -func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MinMasked(y Float64x8, mask Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512BW -func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) MinMasked(y Int8x16, mask Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512BW -func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) MinMasked(y Int8x32, mask Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512BW -func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) MinMasked(y Int8x64, mask Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512BW -func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MinMasked(y Int16x8, mask Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512BW -func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MinMasked(y Int16x16, mask Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512BW -func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MinMasked(y Int16x32, mask Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512F -func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) MinMasked(y Int32x4, mask Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512F -func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) MinMasked(y Int32x8, mask Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512F -func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) MinMasked(y Int32x16, mask Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512F -func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MinMasked(y Int64x2, mask Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512F -func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MinMasked(y Int64x4, mask Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512F -func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MinMasked(y Int64x8, mask Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512BW -func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) MinMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512BW -func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) MinMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512BW -func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) MinMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512BW -func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) MinMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512BW -func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) MinMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512BW -func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) MinMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512F -func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) MinMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512F -func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) MinMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512F -func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) MinMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512F -func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) MinMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512F -func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) MinMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512F -func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) MinMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Mul */ @@ -4384,32 +4384,32 @@ func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MulByPowOf2Masked(y Float32x4, mask Mask32x4) Float32x4 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MulByPowOf2Masked(y Float32x8, mask Mask32x8) Float32x8 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MulByPowOf2Masked(y Float32x16, mask Mask32x16) Float32x16 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MulByPowOf2Masked(y Float64x2, mask Mask64x2) Float64x2 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MulByPowOf2Masked(y Float64x4, mask Mask64x4) Float64x4 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MulByPowOf2Masked(y Float64x8, mask Mask64x8) Float64x8 /* MulEvenWiden */ @@ -4479,37 +4479,37 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MulEvenWidenMasked(y Int64x2, mask Mask64x2) Int64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MulEvenWidenMasked(y Int64x4, mask Mask64x4) Int64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MulEvenWidenMasked(y Int64x8, mask Mask64x8) Int64x8 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* MulHigh */ @@ -4548,32 +4548,32 @@ func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* MulLow */ @@ -4627,79 +4627,79 @@ func (x Int64x8) MulLow(y Int64x8) Int64x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MulLowMasked(y Int16x8, mask Mask16x8) Int16x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MulLowMasked(y Int16x16, mask Mask16x16) Int16x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MulLowMasked(y Int16x32, mask Mask16x32) Int16x32 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) MulLowMasked(y Int32x4, mask Mask32x4) Int32x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) MulLowMasked(y Int32x8, mask Mask32x8) Int32x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) MulLowMasked(y Int32x16, mask Mask32x16) Int32x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MulLowMasked(y Int64x2, mask Mask64x2) Int64x2 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MulLowMasked(y Int64x4, mask Mask64x4) Int64x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MulLowMasked(y Int64x8, mask Mask64x8) Int64x8 /* MulMasked */ // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 /* NotEqual */ @@ -4858,152 +4858,152 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 // NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) NotEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) NotEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) NotEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) NotEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) NotEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) NotEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Or */ @@ -5112,62 +5112,62 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) OrMasked(y Int32x4, mask Mask32x4) Int32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) OrMasked(y Int32x8, mask Mask32x8) Int32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) OrMasked(y Int32x16, mask Mask32x16) Int32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) OrMasked(y Int64x2, mask Mask64x2) Int64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) OrMasked(y Int64x4, mask Mask64x4) Int64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) OrMasked(y Int64x8, mask Mask64x8) Int64x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) OrMasked(y Uint32x4, mask Mask32x4) Uint32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) OrMasked(y Uint32x8, mask Mask32x8) Uint32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* PairDotProd */ @@ -5211,17 +5211,17 @@ func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 +func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 +func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 +func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 /* PairDotProdMasked */ @@ -5229,19 +5229,19 @@ func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x1 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 +func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 +func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProdMasked(y Int16x32, z Mask16x32) Int32x16 +func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 /* PairwiseAdd */ @@ -5811,7 +5811,7 @@ func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, u Mask8x16) Int8x16 +func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, mask Mask8x16) Int8x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5819,7 +5819,7 @@ func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, u Mask8x16) Int8x16 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, u Mask8x16) Uint8x16 +func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, mask Mask8x16) Uint8x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5827,7 +5827,7 @@ func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, u Mask8x16) Uint8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, u Mask8x32) Int8x32 +func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, mask Mask8x32) Int8x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5835,7 +5835,7 @@ func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, u Mask8x32) Int8x32 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, u Mask8x32) Uint8x32 +func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, mask Mask8x32) Uint8x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5843,7 +5843,7 @@ func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, u Mask8x32) Uint8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, u Mask8x64) Int8x64 +func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, mask Mask8x64) Int8x64 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5851,7 +5851,7 @@ func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, u Mask8x64) Int8x64 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, u Mask8x64) Uint8x64 +func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Uint8x64 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5859,7 +5859,7 @@ func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, u Mask8x64) Uint8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, u Mask16x8) Int16x8 +func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int16x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5867,7 +5867,7 @@ func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, u Mask16x8) Int16x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, u Mask16x8) Uint16x8 +func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Uint16x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5875,7 +5875,7 @@ func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, u Mask16x8) Uint1 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, u Mask16x16) Int16x16 +func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) Int16x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5883,7 +5883,7 @@ func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, u Mask16x16) Int // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, u Mask16x16) Uint16x16 +func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16) Uint16x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5891,7 +5891,7 @@ func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, u Mask16x16) U // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, u Mask16x32) Int16x32 +func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) Int16x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5899,7 +5899,7 @@ func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, u Mask16x32) Int // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, u Mask16x32) Uint16x32 +func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32) Uint16x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5907,7 +5907,7 @@ func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, u Mask16x32) U // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PS, CPU Feature: AVX512F -func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, u Mask32x4) Float32x4 +func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) Float32x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5915,7 +5915,7 @@ func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, u Mask32x4) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, u Mask32x4) Int32x4 +func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int32x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5923,7 +5923,7 @@ func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, u Mask32x4) Int32x4 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Uint32x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5931,7 +5931,7 @@ func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, u Mask32x4) Uint3 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PS, CPU Feature: AVX512F -func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, u Mask32x8) Float32x8 +func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) Float32x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5939,7 +5939,7 @@ func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, u Mask32x8) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, u Mask32x8) Int32x8 +func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int32x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5947,7 +5947,7 @@ func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, u Mask32x8) Int32x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Uint32x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5955,7 +5955,7 @@ func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, u Mask32x8) Uint3 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PS, CPU Feature: AVX512F -func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, u Mask32x16) Float32x16 +func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x16) Float32x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5963,7 +5963,7 @@ func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, u Mask32x16) // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, u Mask32x16) Int32x16 +func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) Int32x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5971,7 +5971,7 @@ func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, u Mask32x16) Int // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16) Uint32x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5979,7 +5979,7 @@ func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, u Mask32x16) U // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PD, CPU Feature: AVX512F -func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, u Mask64x2) Float64x2 +func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) Float64x2 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5987,7 +5987,7 @@ func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, u Mask64x2) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, u Mask64x2) Int64x2 +func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int64x2 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5995,7 +5995,7 @@ func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, u Mask64x2) Int64x2 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, u Mask64x2) Uint64x2 +func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Uint64x2 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6003,7 +6003,7 @@ func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, u Mask64x2) Uint6 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PD, CPU Feature: AVX512F -func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, u Mask64x4) Float64x4 +func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) Float64x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6011,7 +6011,7 @@ func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, u Mask64x4) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, u Mask64x4) Int64x4 +func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int64x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6019,7 +6019,7 @@ func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, u Mask64x4) Int64x4 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, u Mask64x4) Uint64x4 +func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Uint64x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6027,7 +6027,7 @@ func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, u Mask64x4) Uint6 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PD, CPU Feature: AVX512F -func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, u Mask64x8) Float64x8 +func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) Float64x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6035,7 +6035,7 @@ func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, u Mask64x8) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, u Mask64x8) Int64x8 +func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int64x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6043,7 +6043,7 @@ func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, u Mask64x8) Int64x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, u Mask64x8) Uint64x8 +func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Uint64x8 /* PermuteMasked */ @@ -6052,168 +6052,168 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, u Mask64x8) Uint6 // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Int8x16 +func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Int8x32 +func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Int8x64 +func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Int16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Int16x8 +func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Uint16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Int16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Int16x16 +func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Uint16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Int16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Int16x32 +func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Uint16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPS, CPU Feature: AVX512F -func (x Float32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Float32x8 +func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Int32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Int32x8 +func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Uint32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPS, CPU Feature: AVX512F -func (x Float32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Float32x16 +func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Int32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Int32x16 +func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Uint32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Float64x4 +func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Int64x4 +func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Float64x8 +func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Int64x8 +func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 /* PopCount */ @@ -6342,122 +6342,122 @@ func (x Uint64x8) PopCount() Uint64x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 +func (x Int8x16) PopCountMasked(mask Mask8x16) Int8x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 +func (x Int8x32) PopCountMasked(mask Mask8x32) Int8x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 +func (x Int8x64) PopCountMasked(mask Mask8x64) Int8x64 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 +func (x Int16x8) PopCountMasked(mask Mask16x8) Int16x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 +func (x Int16x16) PopCountMasked(mask Mask16x16) Int16x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 +func (x Int16x32) PopCountMasked(mask Mask16x32) Int16x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 +func (x Int32x4) PopCountMasked(mask Mask32x4) Int32x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 +func (x Int32x8) PopCountMasked(mask Mask32x8) Int32x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 +func (x Int32x16) PopCountMasked(mask Mask32x16) Int32x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 +func (x Int64x2) PopCountMasked(mask Mask64x2) Int64x2 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 +func (x Int64x4) PopCountMasked(mask Mask64x4) Int64x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 +func (x Int64x8) PopCountMasked(mask Mask64x8) Int64x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 +func (x Uint8x16) PopCountMasked(mask Mask8x16) Uint8x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 +func (x Uint8x32) PopCountMasked(mask Mask8x32) Uint8x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 +func (x Uint8x64) PopCountMasked(mask Mask8x64) Uint8x64 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 +func (x Uint16x8) PopCountMasked(mask Mask16x8) Uint16x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 +func (x Uint16x16) PopCountMasked(mask Mask16x16) Uint16x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 +func (x Uint16x32) PopCountMasked(mask Mask16x32) Uint16x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 +func (x Uint32x4) PopCountMasked(mask Mask32x4) Uint32x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 +func (x Uint32x8) PopCountMasked(mask Mask32x8) Uint32x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 +func (x Uint32x16) PopCountMasked(mask Mask32x16) Uint32x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 +func (x Uint64x2) PopCountMasked(mask Mask64x2) Uint64x2 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 +func (x Uint64x4) PopCountMasked(mask Mask64x4) Uint64x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 +func (x Uint64x8) PopCountMasked(mask Mask64x8) Uint64x8 /* RotateAllLeft */ @@ -6552,84 +6552,84 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Int32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Int32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Int32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Int64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Int64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Int64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Uint32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Uint32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Uint32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Uint64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Uint64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateAllRight */ @@ -6724,84 +6724,84 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Int32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Int32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Int32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Int64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Int64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Int64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Uint32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Uint32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Uint32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Uint64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Uint64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Uint64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateLeft */ @@ -6870,62 +6870,62 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) RotateLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) RotateLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) RotateLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) RotateLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) RotateLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) RotateLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) RotateLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) RotateLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) RotateLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) RotateLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) RotateLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) RotateLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* RotateRight */ @@ -6994,62 +6994,62 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) RotateRightMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) RotateRightMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) RotateRightMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) RotateRightMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) RotateRightMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Round */ @@ -7124,42 +7124,42 @@ func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) RoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) RoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) RoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) RoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) RoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) RoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* SaturatedAdd */ @@ -7228,62 +7228,62 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) SaturatedAddMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) SaturatedAddMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) SaturatedAddMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) SaturatedAddMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) SaturatedAddMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) SaturatedAddMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) SaturatedAddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) SaturatedAddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) SaturatedAddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) SaturatedAddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* SaturatedPairDotProdAccumulate */ @@ -7307,17 +7307,17 @@ func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x1 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 +func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 +func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 +func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 /* SaturatedPairwiseAdd */ @@ -7414,62 +7414,62 @@ func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) SaturatedSubMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) SaturatedSubMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) SaturatedSubMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) SaturatedSubMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) SaturatedSubMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) SaturatedSubMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) SaturatedSubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) SaturatedSubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) SaturatedSubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) SaturatedSubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) SaturatedSubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) SaturatedSubMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* SaturatedUnsignedSignedPairDotProd */ @@ -7497,19 +7497,19 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 +func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, mask Mask16x8) Int16x8 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, mask Mask16x16) Int16x16 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, z Mask16x32) Int16x32 +func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask16x32) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ @@ -7548,32 +7548,32 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z In // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 /* Set128 */ @@ -7951,7 +7951,7 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7959,7 +7959,7 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7967,7 +7967,7 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7975,7 +7975,7 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7983,7 +7983,7 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7991,7 +7991,7 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7999,7 +7999,7 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8007,7 +8007,7 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8015,7 +8015,7 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8023,7 +8023,7 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8031,7 +8031,7 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8039,7 +8039,7 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8047,7 +8047,7 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8055,7 +8055,7 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8063,7 +8063,7 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8071,7 +8071,7 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8079,7 +8079,7 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8087,99 +8087,99 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Int16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Int16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Int16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Int32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Int32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Int32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Uint32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Uint32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Uint32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftAllRight */ @@ -8427,7 +8427,7 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8435,7 +8435,7 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8443,7 +8443,7 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8451,7 +8451,7 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8459,7 +8459,7 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8467,7 +8467,7 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8475,7 +8475,7 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8483,7 +8483,7 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8491,7 +8491,7 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8499,7 +8499,7 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8507,7 +8507,7 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8515,7 +8515,7 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8523,7 +8523,7 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8531,7 +8531,7 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8539,7 +8539,7 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8547,7 +8547,7 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8555,7 +8555,7 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8563,99 +8563,99 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllRightMasked */ // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAW, CPU Feature: AVX512BW -func (x Int16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAW, CPU Feature: AVX512BW -func (x Int16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAW, CPU Feature: AVX512BW -func (x Int16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAD, CPU Feature: AVX512F -func (x Int32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAD, CPU Feature: AVX512F -func (x Int32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAD, CPU Feature: AVX512F -func (x Int32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512F -func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512F -func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512F -func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX512F -func (x Uint32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX512F -func (x Uint32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX512F -func (x Uint32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftLeft */ @@ -8865,201 +8865,201 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 +func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 +func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 +func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 +func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 +func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 +func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 +func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 +func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 +func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 +func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 +func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 +func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftLeftMasked */ // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftLeftMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftLeftMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftLeftMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftLeftMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftLeftMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftLeftMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRight */ @@ -9269,201 +9269,201 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 +func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 +func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 +func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 +func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 +func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 +func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 +func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 +func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 +func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 +func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 +func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 +func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRightMasked */ // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512BW -func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftRightMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512BW -func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftRightMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512BW -func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftRightMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512F -func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftRightMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512F -func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftRightMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512F -func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftRightMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512F -func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftRightMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512F -func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftRightMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512F -func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftRightMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftRightMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftRightMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftRightMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512F -func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512F -func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512F -func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Sign */ @@ -9540,32 +9540,32 @@ func (x Float64x8) Sqrt() Float64x8 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512F -func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 +func (x Float32x4) SqrtMasked(mask Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512F -func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 +func (x Float32x8) SqrtMasked(mask Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512F -func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 +func (x Float32x16) SqrtMasked(mask Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512F -func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 +func (x Float64x2) SqrtMasked(mask Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512F -func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 +func (x Float64x4) SqrtMasked(mask Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512F -func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 +func (x Float64x8) SqrtMasked(mask Mask64x8) Float64x8 /* Sub */ @@ -9724,152 +9724,152 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512F -func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) SubMasked(y Float32x4, mask Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512F -func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) SubMasked(y Float32x8, mask Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512F -func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) SubMasked(y Float32x16, mask Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512F -func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) SubMasked(y Float64x2, mask Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512F -func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) SubMasked(y Float64x4, mask Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512F -func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) SubMasked(y Float64x8, mask Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) SubMasked(y Int8x16, mask Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) SubMasked(y Int8x32, mask Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) SubMasked(y Int8x64, mask Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) SubMasked(y Int16x8, mask Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) SubMasked(y Int16x16, mask Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) SubMasked(y Int16x32, mask Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) SubMasked(y Int32x4, mask Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) SubMasked(y Int32x8, mask Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) SubMasked(y Int32x16, mask Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) SubMasked(y Int64x2, mask Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) SubMasked(y Int64x4, mask Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) SubMasked(y Int64x8, mask Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) SubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) SubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) SubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) SubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) SubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) SubMasked(y Uint16x32, mask Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) SubMasked(y Uint32x4, mask Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) SubMasked(y Uint32x8, mask Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) SubMasked(y Uint32x16, mask Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) SubMasked(y Uint64x2, mask Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Trunc */ @@ -9944,42 +9944,42 @@ func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) TruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) TruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) TruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) TruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) TruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) TruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) TruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) TruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) TruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ @@ -10018,32 +10018,32 @@ func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Ui // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 /* Xor */ @@ -10152,62 +10152,62 @@ func (x Uint64x8) Xor(y Uint64x8) Uint64x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) XorMasked(y Int32x4, mask Mask32x4) Int32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) XorMasked(y Int32x8, mask Mask32x8) Int32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) XorMasked(y Int32x16, mask Mask32x16) Int32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) XorMasked(y Int64x2, mask Mask64x2) Int64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) XorMasked(y Int64x4, mask Mask64x4) Int64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) XorMasked(y Int64x8, mask Mask64x8) Int64x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) XorMasked(y Uint32x4, mask Mask32x4) Uint32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) XorMasked(y Uint32x8, mask Mask32x8) Uint32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) XorMasked(y Uint32x16, mask Mask32x16) Uint32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) XorMasked(y Uint64x2, mask Mask64x2) Uint64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) From 6d1068014168da26b2f5bcaab15a137aee4d7d05 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 20:29:46 +0000 Subject: [PATCH 087/139] [dev.simd] cmd/compile, simd: add Compress This CL is generated by CL 687975. Change-Id: I21707d108773cc6d8e6f07aaed60e756faa1e6cb Reviewed-on: https://go-review.googlesource.com/c/go/+/687995 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 36 + .../compile/internal/ssa/_gen/simdAMD64.rules | 30 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 36 +- .../internal/ssa/_gen/simdgenericOps.go | 94 ++- src/cmd/compile/internal/ssa/opGen.go | 732 ++++++++++++++---- src/cmd/compile/internal/ssa/rewriteAMD64.go | 540 +++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 30 + src/simd/ops_amd64.go | 182 +++++ src/simd/simd_test.go | 10 + src/simd/simd_wrapped_test.go | 638 ++++++++++++++- 10 files changed, 2142 insertions(+), 186 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 1a7e3be9e50d27..67179ef12d6594 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -600,6 +600,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VCOMPRESSPSMasked128, + ssa.OpAMD64VCOMPRESSPSMasked256, + ssa.OpAMD64VCOMPRESSPSMasked512, + ssa.OpAMD64VCOMPRESSPDMasked128, + ssa.OpAMD64VCOMPRESSPDMasked256, + ssa.OpAMD64VCOMPRESSPDMasked512, + ssa.OpAMD64VPCOMPRESSBMasked128, + ssa.OpAMD64VPCOMPRESSBMasked256, + ssa.OpAMD64VPCOMPRESSBMasked512, + ssa.OpAMD64VPCOMPRESSWMasked128, + ssa.OpAMD64VPCOMPRESSWMasked256, + ssa.OpAMD64VPCOMPRESSWMasked512, + ssa.OpAMD64VPCOMPRESSDMasked128, + ssa.OpAMD64VPCOMPRESSDMasked256, + ssa.OpAMD64VPCOMPRESSDMasked512, + ssa.OpAMD64VPCOMPRESSQMasked128, + ssa.OpAMD64VPCOMPRESSQMasked256, + ssa.OpAMD64VPCOMPRESSQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1078,6 +1096,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VCOMPRESSPSMasked128, + ssa.OpAMD64VCOMPRESSPSMasked256, + ssa.OpAMD64VCOMPRESSPSMasked512, + ssa.OpAMD64VCOMPRESSPDMasked128, + ssa.OpAMD64VCOMPRESSPDMasked256, + ssa.OpAMD64VCOMPRESSPDMasked512, + ssa.OpAMD64VPCOMPRESSBMasked128, + ssa.OpAMD64VPCOMPRESSBMasked256, + ssa.OpAMD64VPCOMPRESSBMasked512, + ssa.OpAMD64VPCOMPRESSWMasked128, + ssa.OpAMD64VPCOMPRESSWMasked256, + ssa.OpAMD64VPCOMPRESSWMasked512, + ssa.OpAMD64VPCOMPRESSDMasked128, + ssa.OpAMD64VPCOMPRESSDMasked256, + ssa.OpAMD64VPCOMPRESSDMasked512, + ssa.OpAMD64VPCOMPRESSQMasked128, + ssa.OpAMD64VPCOMPRESSQMasked256, + ssa.OpAMD64VPCOMPRESSQMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, ssa.OpAMD64VREDUCEPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 5898406e9d351c..88744174300ced 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -204,6 +204,36 @@ (CeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (CeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (CeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(CompressFloat32x4 x mask) => (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) +(CompressFloat32x8 x mask) => (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) +(CompressFloat32x16 x mask) => (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) +(CompressFloat64x2 x mask) => (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM mask)) +(CompressFloat64x4 x mask) => (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM mask)) +(CompressFloat64x8 x mask) => (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM mask)) +(CompressInt8x16 x mask) => (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) +(CompressInt8x32 x mask) => (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) +(CompressInt8x64 x mask) => (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) +(CompressInt16x8 x mask) => (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) +(CompressInt16x16 x mask) => (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) +(CompressInt16x32 x mask) => (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) +(CompressInt32x4 x mask) => (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) +(CompressInt32x8 x mask) => (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) +(CompressInt32x16 x mask) => (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) +(CompressInt64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) +(CompressInt64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) +(CompressInt64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(CompressUint8x16 x mask) => (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) +(CompressUint8x32 x mask) => (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) +(CompressUint8x64 x mask) => (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) +(CompressUint16x8 x mask) => (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) +(CompressUint16x16 x mask) => (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) +(CompressUint16x32 x mask) => (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) +(CompressUint32x4 x mask) => (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) +(CompressUint32x8 x mask) => (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) +(CompressUint32x16 x mask) => (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) +(CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) +(CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) +(CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 19ac0b0dea6583..a7a3c9715c45cf 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -9,6 +9,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, @@ -36,6 +37,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, @@ -65,6 +67,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPSMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VFMADD213PS256", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, @@ -94,6 +97,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, @@ -123,6 +127,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, @@ -151,6 +156,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, @@ -175,6 +181,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSWMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -216,6 +223,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -250,6 +258,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDWMasked128", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSWMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -295,6 +304,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDDMasked512", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -339,6 +349,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSDMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -387,6 +398,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSDMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -435,6 +447,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSQMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -472,6 +485,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSQMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -511,6 +525,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -549,6 +564,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSBMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -572,6 +588,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSBMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -593,6 +610,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -657,12 +675,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -687,12 +705,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -706,8 +724,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -719,12 +737,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -741,8 +759,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index dd27d0cc9411dd..00e4baf141de5f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -9,6 +9,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "CompressFloat32x16", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x16", argLength: 3, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, @@ -51,6 +52,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, + {name: "CompressFloat32x4", argLength: 2, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, @@ -99,6 +101,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, + {name: "CompressFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, @@ -147,6 +150,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, + {name: "CompressFloat64x2", argLength: 2, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, @@ -195,6 +199,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, + {name: "CompressFloat64x4", argLength: 2, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, @@ -240,6 +245,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "CompressFloat64x8", argLength: 2, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, @@ -280,6 +286,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, {name: "AndNotInt16x16", argLength: 2, commutative: false}, + {name: "CompressInt16x16", argLength: 2, commutative: false}, {name: "EqualInt16x16", argLength: 2, commutative: true}, {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, {name: "GreaterInt16x16", argLength: 2, commutative: false}, @@ -333,6 +340,7 @@ func simdGenericOps() []opData { {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, {name: "AddInt16x32", argLength: 2, commutative: true}, {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, + {name: "CompressInt16x32", argLength: 2, commutative: false}, {name: "EqualInt16x32", argLength: 2, commutative: true}, {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, {name: "GreaterInt16x32", argLength: 2, commutative: false}, @@ -381,6 +389,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, {name: "AndNotInt16x8", argLength: 2, commutative: false}, + {name: "CompressInt16x8", argLength: 2, commutative: false}, {name: "EqualInt16x8", argLength: 2, commutative: true}, {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, {name: "GreaterInt16x8", argLength: 2, commutative: false}, @@ -438,6 +447,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, {name: "AndNotInt32x16", argLength: 2, commutative: false}, {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, + {name: "CompressInt32x16", argLength: 2, commutative: false}, {name: "EqualInt32x16", argLength: 2, commutative: true}, {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, {name: "GreaterInt32x16", argLength: 2, commutative: false}, @@ -496,6 +506,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, {name: "AndNotInt32x4", argLength: 2, commutative: false}, {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, + {name: "CompressInt32x4", argLength: 2, commutative: false}, {name: "EqualInt32x4", argLength: 2, commutative: true}, {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, {name: "GreaterInt32x4", argLength: 2, commutative: false}, @@ -558,6 +569,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, {name: "AndNotInt32x8", argLength: 2, commutative: false}, {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, + {name: "CompressInt32x8", argLength: 2, commutative: false}, {name: "EqualInt32x8", argLength: 2, commutative: true}, {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "GreaterInt32x8", argLength: 2, commutative: false}, @@ -620,6 +632,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, {name: "AndNotInt64x2", argLength: 2, commutative: false}, {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, + {name: "CompressInt64x2", argLength: 2, commutative: false}, {name: "EqualInt64x2", argLength: 2, commutative: true}, {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, {name: "GreaterInt64x2", argLength: 2, commutative: false}, @@ -672,6 +685,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, {name: "AndNotInt64x4", argLength: 2, commutative: false}, {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, + {name: "CompressInt64x4", argLength: 2, commutative: false}, {name: "EqualInt64x4", argLength: 2, commutative: true}, {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, {name: "GreaterInt64x4", argLength: 2, commutative: false}, @@ -724,6 +738,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, {name: "AndNotInt64x8", argLength: 2, commutative: false}, {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, + {name: "CompressInt64x8", argLength: 2, commutative: false}, {name: "EqualInt64x8", argLength: 2, commutative: true}, {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, {name: "GreaterInt64x8", argLength: 2, commutative: false}, @@ -774,6 +789,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, {name: "AndNotInt8x16", argLength: 2, commutative: false}, + {name: "CompressInt8x16", argLength: 2, commutative: false}, {name: "EqualInt8x16", argLength: 2, commutative: true}, {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, {name: "GreaterInt8x16", argLength: 2, commutative: false}, @@ -807,6 +823,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, {name: "AndNotInt8x32", argLength: 2, commutative: false}, + {name: "CompressInt8x32", argLength: 2, commutative: false}, {name: "EqualInt8x32", argLength: 2, commutative: true}, {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, {name: "GreaterInt8x32", argLength: 2, commutative: false}, @@ -838,6 +855,7 @@ func simdGenericOps() []opData { {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, {name: "AddInt8x64", argLength: 2, commutative: true}, {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, + {name: "CompressInt8x64", argLength: 2, commutative: false}, {name: "EqualInt8x64", argLength: 2, commutative: true}, {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, {name: "GreaterInt8x64", argLength: 2, commutative: false}, @@ -868,6 +886,7 @@ func simdGenericOps() []opData { {name: "AndNotUint16x16", argLength: 2, commutative: false}, {name: "AverageUint16x16", argLength: 2, commutative: true}, {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, + {name: "CompressUint16x16", argLength: 2, commutative: false}, {name: "EqualUint16x16", argLength: 2, commutative: true}, {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, {name: "GreaterUint16x16", argLength: 2, commutative: false}, @@ -893,10 +912,10 @@ func simdGenericOps() []opData { {name: "PermuteUint16x16", argLength: 2, commutative: false}, {name: "Permute2Uint16x16", argLength: 3, commutative: false}, {name: "Permute2Int16x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -922,6 +941,7 @@ func simdGenericOps() []opData { {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, {name: "AverageUint16x32", argLength: 2, commutative: true}, {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, + {name: "CompressUint16x32", argLength: 2, commutative: false}, {name: "EqualUint16x32", argLength: 2, commutative: true}, {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, {name: "GreaterUint16x32", argLength: 2, commutative: false}, @@ -940,12 +960,12 @@ func simdGenericOps() []opData { {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "PermuteUint16x32", argLength: 2, commutative: false}, {name: "PermuteInt16x32", argLength: 2, commutative: false}, + {name: "PermuteUint16x32", argLength: 2, commutative: false}, {name: "Permute2Int16x32", argLength: 3, commutative: false}, {name: "Permute2Uint16x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, @@ -974,6 +994,7 @@ func simdGenericOps() []opData { {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AverageUint16x8", argLength: 2, commutative: true}, {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, + {name: "CompressUint16x8", argLength: 2, commutative: false}, {name: "EqualUint16x8", argLength: 2, commutative: true}, {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, {name: "GreaterUint16x8", argLength: 2, commutative: false}, @@ -1030,6 +1051,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, {name: "AndNotUint32x16", argLength: 2, commutative: false}, {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, + {name: "CompressUint32x16", argLength: 2, commutative: false}, {name: "EqualUint32x16", argLength: 2, commutative: true}, {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, @@ -1049,17 +1071,17 @@ func simdGenericOps() []opData { {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, {name: "PermuteInt32x16", argLength: 2, commutative: false}, - {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "PermuteFloat32x16", argLength: 2, commutative: false}, - {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "Permute2Uint32x16", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, + {name: "Permute2Int32x16", argLength: 3, commutative: false}, {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, @@ -1092,6 +1114,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, {name: "AndNotUint32x4", argLength: 2, commutative: false}, {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, + {name: "CompressUint32x4", argLength: 2, commutative: false}, {name: "EqualUint32x4", argLength: 2, commutative: true}, {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, {name: "GreaterUint32x4", argLength: 2, commutative: false}, @@ -1114,11 +1137,11 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "Permute2Uint32x4", argLength: 3, commutative: false}, - {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Int32x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, @@ -1151,6 +1174,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, {name: "AndNotUint32x8", argLength: 2, commutative: false}, {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, + {name: "CompressUint32x8", argLength: 2, commutative: false}, {name: "EqualUint32x8", argLength: 2, commutative: true}, {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, {name: "GreaterUint32x8", argLength: 2, commutative: false}, @@ -1172,18 +1196,18 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "PermuteUint32x8", argLength: 2, commutative: false}, {name: "PermuteInt32x8", argLength: 2, commutative: false}, {name: "PermuteFloat32x8", argLength: 2, commutative: false}, - {name: "PermuteUint32x8", argLength: 2, commutative: false}, {name: "Permute2Uint32x8", argLength: 3, commutative: false}, {name: "Permute2Float32x8", argLength: 3, commutative: false}, {name: "Permute2Int32x8", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, @@ -1216,6 +1240,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, {name: "AndNotUint64x2", argLength: 2, commutative: false}, {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, + {name: "CompressUint64x2", argLength: 2, commutative: false}, {name: "EqualUint64x2", argLength: 2, commutative: true}, {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "GreaterUint64x2", argLength: 2, commutative: false}, @@ -1236,11 +1261,11 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, + {name: "Permute2Float64x2", argLength: 3, commutative: false}, {name: "Permute2Uint64x2", argLength: 3, commutative: false}, {name: "Permute2Int64x2", argLength: 3, commutative: false}, - {name: "Permute2Float64x2", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, @@ -1270,6 +1295,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, {name: "AndNotUint64x4", argLength: 2, commutative: false}, {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, + {name: "CompressUint64x4", argLength: 2, commutative: false}, {name: "EqualUint64x4", argLength: 2, commutative: true}, {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "GreaterUint64x4", argLength: 2, commutative: false}, @@ -1290,18 +1316,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, - {name: "PermuteFloat64x4", argLength: 2, commutative: false}, - {name: "Permute2Uint64x4", argLength: 3, commutative: false}, {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Uint64x4", argLength: 3, commutative: false}, {name: "Permute2Float64x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1330,6 +1356,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndNotUint64x8", argLength: 2, commutative: false}, {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, + {name: "CompressUint64x8", argLength: 2, commutative: false}, {name: "EqualUint64x8", argLength: 2, commutative: true}, {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, @@ -1350,18 +1377,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, - {name: "PermuteUint64x8", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, {name: "PermuteFloat64x8", argLength: 2, commutative: false}, - {name: "Permute2Int64x8", argLength: 3, commutative: false}, {name: "Permute2Uint64x8", argLength: 3, commutative: false}, {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, @@ -1390,6 +1417,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, + {name: "CompressUint8x16", argLength: 2, commutative: false}, {name: "EqualUint8x16", argLength: 2, commutative: true}, {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, @@ -1411,12 +1439,12 @@ func simdGenericOps() []opData { {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "PermuteUint8x16", argLength: 2, commutative: false}, {name: "PermuteInt8x16", argLength: 2, commutative: false}, - {name: "Permute2Uint8x16", argLength: 3, commutative: false}, {name: "Permute2Int8x16", argLength: 3, commutative: false}, + {name: "Permute2Uint8x16", argLength: 3, commutative: false}, {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, {name: "PopCountUint8x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, @@ -1434,6 +1462,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, + {name: "CompressUint8x32", argLength: 2, commutative: false}, {name: "EqualUint8x32", argLength: 2, commutative: true}, {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, @@ -1457,10 +1486,10 @@ func simdGenericOps() []opData { {name: "PermuteInt8x32", argLength: 2, commutative: false}, {name: "Permute2Int8x32", argLength: 3, commutative: false}, {name: "Permute2Uint8x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, @@ -1476,6 +1505,7 @@ func simdGenericOps() []opData { {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, + {name: "CompressUint8x64", argLength: 2, commutative: false}, {name: "EqualUint8x64", argLength: 2, commutative: true}, {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, @@ -1494,14 +1524,14 @@ func simdGenericOps() []opData { {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "PermuteUint8x64", argLength: 2, commutative: false}, {name: "PermuteInt8x64", argLength: 2, commutative: false}, - {name: "Permute2Int8x64", argLength: 3, commutative: false}, + {name: "PermuteUint8x64", argLength: 2, commutative: false}, {name: "Permute2Uint8x64", argLength: 3, commutative: false}, + {name: "Permute2Int8x64", argLength: 3, commutative: false}, {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, {name: "PopCountUint8x64", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 60a12e21fb198e..35612493ea39e4 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1204,6 +1204,7 @@ const ( OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PS512 OpAMD64VRSQRT14PSMasked512 + OpAMD64VCOMPRESSPSMasked512 OpAMD64VDIVPS512 OpAMD64VDIVPSMasked512 OpAMD64VFMADD213PS512 @@ -1231,6 +1232,7 @@ const ( OpAMD64VRCP14PSMasked128 OpAMD64VRSQRTPS128 OpAMD64VRSQRT14PSMasked128 + OpAMD64VCOMPRESSPSMasked128 OpAMD64VDIVPS128 OpAMD64VDIVPSMasked128 OpAMD64VFMADD213PS128 @@ -1260,6 +1262,7 @@ const ( OpAMD64VRCP14PSMasked256 OpAMD64VRSQRTPS256 OpAMD64VRSQRT14PSMasked256 + OpAMD64VCOMPRESSPSMasked256 OpAMD64VDIVPS256 OpAMD64VDIVPSMasked256 OpAMD64VFMADD213PS256 @@ -1289,6 +1292,7 @@ const ( OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PD128 OpAMD64VRSQRT14PDMasked128 + OpAMD64VCOMPRESSPDMasked128 OpAMD64VDIVPD128 OpAMD64VDIVPDMasked128 OpAMD64VFMADD213PD128 @@ -1318,6 +1322,7 @@ const ( OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PD256 OpAMD64VRSQRT14PDMasked256 + OpAMD64VCOMPRESSPDMasked256 OpAMD64VDIVPD256 OpAMD64VDIVPDMasked256 OpAMD64VFMADD213PD256 @@ -1346,6 +1351,7 @@ const ( OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PD512 OpAMD64VRSQRT14PDMasked512 + OpAMD64VCOMPRESSPDMasked512 OpAMD64VDIVPD512 OpAMD64VDIVPDMasked512 OpAMD64VFMADD213PD512 @@ -1370,6 +1376,7 @@ const ( OpAMD64VPABSWMasked256 OpAMD64VPADDW256 OpAMD64VPADDWMasked256 + OpAMD64VPCOMPRESSWMasked256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 OpAMD64VPMAXSW256 @@ -1411,6 +1418,7 @@ const ( OpAMD64VPABSWMasked512 OpAMD64VPADDW512 OpAMD64VPADDWMasked512 + OpAMD64VPCOMPRESSWMasked512 OpAMD64VPMAXSW512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSW512 @@ -1445,6 +1453,7 @@ const ( OpAMD64VPABSWMasked128 OpAMD64VPADDW128 OpAMD64VPADDWMasked128 + OpAMD64VPCOMPRESSWMasked128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 OpAMD64VPMAXSW128 @@ -1490,6 +1499,7 @@ const ( OpAMD64VPANDDMasked512 OpAMD64VPANDND512 OpAMD64VPANDNDMasked512 + OpAMD64VPCOMPRESSDMasked512 OpAMD64VPMAXSD512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSD512 @@ -1534,6 +1544,7 @@ const ( OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 OpAMD64VPANDNDMasked128 + OpAMD64VPCOMPRESSDMasked128 OpAMD64VPCMPEQD128 OpAMD64VPCMPGTD128 OpAMD64VPMAXSD128 @@ -1582,6 +1593,7 @@ const ( OpAMD64VPADDDMasked256 OpAMD64VPANDDMasked256 OpAMD64VPANDNDMasked256 + OpAMD64VPCOMPRESSDMasked256 OpAMD64VPCMPEQD256 OpAMD64VPCMPGTD256 OpAMD64VPMAXSD256 @@ -1630,6 +1642,7 @@ const ( OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 + OpAMD64VPCOMPRESSQMasked128 OpAMD64VPCMPEQQ128 OpAMD64VPCMPGTQ128 OpAMD64VPMAXSQ128 @@ -1667,6 +1680,7 @@ const ( OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 + OpAMD64VPCOMPRESSQMasked256 OpAMD64VPCMPEQQ256 OpAMD64VPCMPGTQ256 OpAMD64VPMAXSQ256 @@ -1706,6 +1720,7 @@ const ( OpAMD64VPANDQMasked512 OpAMD64VPANDNQ512 OpAMD64VPANDNQMasked512 + OpAMD64VPCOMPRESSQMasked512 OpAMD64VPMAXSQ512 OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQ512 @@ -1744,6 +1759,7 @@ const ( OpAMD64VPADDBMasked128 OpAMD64VPAND128 OpAMD64VPANDN128 + OpAMD64VPCOMPRESSBMasked128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 OpAMD64VPMAXSB128 @@ -1767,6 +1783,7 @@ const ( OpAMD64VPADDBMasked256 OpAMD64VPAND256 OpAMD64VPANDN256 + OpAMD64VPCOMPRESSBMasked256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 OpAMD64VPMAXSB256 @@ -1788,6 +1805,7 @@ const ( OpAMD64VPABSBMasked512 OpAMD64VPADDB512 OpAMD64VPADDBMasked512 + OpAMD64VPCOMPRESSBMasked512 OpAMD64VPMAXSB512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSB512 @@ -1852,12 +1870,12 @@ const ( OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 OpAMD64VPMINUDMasked512 - OpAMD64VPERMPS512 OpAMD64VPERMD512 - OpAMD64VPERMI2D512 + OpAMD64VPERMPS512 OpAMD64VPERMI2PS512 - OpAMD64VPERMI2DMasked512 + OpAMD64VPERMI2D512 OpAMD64VPERMI2PSMasked512 + OpAMD64VPERMI2DMasked512 OpAMD64VPERMPSMasked512 OpAMD64VPERMDMasked512 OpAMD64VPSRLD512 @@ -1882,12 +1900,12 @@ const ( OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 - OpAMD64VPERMD256 OpAMD64VPERMPS256 + OpAMD64VPERMD256 OpAMD64VPERMI2D256 OpAMD64VPERMI2PS256 - OpAMD64VPERMI2PSMasked256 OpAMD64VPERMI2DMasked256 + OpAMD64VPERMI2PSMasked256 OpAMD64VPERMPSMasked256 OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 @@ -1901,8 +1919,8 @@ const ( OpAMD64VPMULUDQMasked128 OpAMD64VPERMI2PD128 OpAMD64VPERMI2Q128 - OpAMD64VPERMI2QMasked128 OpAMD64VPERMI2PDMasked128 + OpAMD64VPERMI2QMasked128 OpAMD64VPSRLQ128 OpAMD64VPSRLQMasked128 OpAMD64VPSRLVQ128 @@ -1914,12 +1932,12 @@ const ( OpAMD64VPMULUDQMasked256 OpAMD64VPERMQ256 OpAMD64VPERMPD256 - OpAMD64VPERMI2PD256 OpAMD64VPERMI2Q256 + OpAMD64VPERMI2PD256 OpAMD64VPERMI2PDMasked256 OpAMD64VPERMI2QMasked256 - OpAMD64VPERMPDMasked256 OpAMD64VPERMQMasked256 + OpAMD64VPERMPDMasked256 OpAMD64VPSRLQ256 OpAMD64VPSRLQMasked256 OpAMD64VPSRLVQ256 @@ -1936,8 +1954,8 @@ const ( OpAMD64VPERMI2PD512 OpAMD64VPERMI2QMasked512 OpAMD64VPERMI2PDMasked512 - OpAMD64VPERMPDMasked512 OpAMD64VPERMQMasked512 + OpAMD64VPERMPDMasked512 OpAMD64VPSRLQ512 OpAMD64VPSRLQMasked512 OpAMD64VPSRLVQ512 @@ -4391,6 +4409,7 @@ const ( OpApproximateReciprocalMaskedFloat32x16 OpApproximateReciprocalOfSqrtFloat32x16 OpApproximateReciprocalOfSqrtMaskedFloat32x16 + OpCompressFloat32x16 OpDivFloat32x16 OpDivMaskedFloat32x16 OpEqualFloat32x16 @@ -4433,6 +4452,7 @@ const ( OpApproximateReciprocalOfSqrtFloat32x4 OpApproximateReciprocalOfSqrtMaskedFloat32x4 OpCeilFloat32x4 + OpCompressFloat32x4 OpDivFloat32x4 OpDivMaskedFloat32x4 OpDotProdBroadcastFloat32x4 @@ -4481,6 +4501,7 @@ const ( OpApproximateReciprocalOfSqrtFloat32x8 OpApproximateReciprocalOfSqrtMaskedFloat32x8 OpCeilFloat32x8 + OpCompressFloat32x8 OpDivFloat32x8 OpDivMaskedFloat32x8 OpDotProdBroadcastFloat32x8 @@ -4529,6 +4550,7 @@ const ( OpApproximateReciprocalOfSqrtFloat64x2 OpApproximateReciprocalOfSqrtMaskedFloat64x2 OpCeilFloat64x2 + OpCompressFloat64x2 OpDivFloat64x2 OpDivMaskedFloat64x2 OpDotProdBroadcastFloat64x2 @@ -4577,6 +4599,7 @@ const ( OpApproximateReciprocalOfSqrtFloat64x4 OpApproximateReciprocalOfSqrtMaskedFloat64x4 OpCeilFloat64x4 + OpCompressFloat64x4 OpDivFloat64x4 OpDivMaskedFloat64x4 OpEqualFloat64x4 @@ -4622,6 +4645,7 @@ const ( OpApproximateReciprocalMaskedFloat64x8 OpApproximateReciprocalOfSqrtFloat64x8 OpApproximateReciprocalOfSqrtMaskedFloat64x8 + OpCompressFloat64x8 OpDivFloat64x8 OpDivMaskedFloat64x8 OpEqualFloat64x8 @@ -4662,6 +4686,7 @@ const ( OpAddMaskedInt16x16 OpAndInt16x16 OpAndNotInt16x16 + OpCompressInt16x16 OpEqualInt16x16 OpEqualMaskedInt16x16 OpGreaterInt16x16 @@ -4715,6 +4740,7 @@ const ( OpAbsoluteMaskedInt16x32 OpAddInt16x32 OpAddMaskedInt16x32 + OpCompressInt16x32 OpEqualInt16x32 OpEqualMaskedInt16x32 OpGreaterInt16x32 @@ -4763,6 +4789,7 @@ const ( OpAddMaskedInt16x8 OpAndInt16x8 OpAndNotInt16x8 + OpCompressInt16x8 OpEqualInt16x8 OpEqualMaskedInt16x8 OpGreaterInt16x8 @@ -4820,6 +4847,7 @@ const ( OpAndMaskedInt32x16 OpAndNotInt32x16 OpAndNotMaskedInt32x16 + OpCompressInt32x16 OpEqualInt32x16 OpEqualMaskedInt32x16 OpGreaterInt32x16 @@ -4878,6 +4906,7 @@ const ( OpAndMaskedInt32x4 OpAndNotInt32x4 OpAndNotMaskedInt32x4 + OpCompressInt32x4 OpEqualInt32x4 OpEqualMaskedInt32x4 OpGreaterInt32x4 @@ -4940,6 +4969,7 @@ const ( OpAndMaskedInt32x8 OpAndNotInt32x8 OpAndNotMaskedInt32x8 + OpCompressInt32x8 OpEqualInt32x8 OpEqualMaskedInt32x8 OpGreaterInt32x8 @@ -5002,6 +5032,7 @@ const ( OpAndMaskedInt64x2 OpAndNotInt64x2 OpAndNotMaskedInt64x2 + OpCompressInt64x2 OpEqualInt64x2 OpEqualMaskedInt64x2 OpGreaterInt64x2 @@ -5054,6 +5085,7 @@ const ( OpAndMaskedInt64x4 OpAndNotInt64x4 OpAndNotMaskedInt64x4 + OpCompressInt64x4 OpEqualInt64x4 OpEqualMaskedInt64x4 OpGreaterInt64x4 @@ -5106,6 +5138,7 @@ const ( OpAndMaskedInt64x8 OpAndNotInt64x8 OpAndNotMaskedInt64x8 + OpCompressInt64x8 OpEqualInt64x8 OpEqualMaskedInt64x8 OpGreaterInt64x8 @@ -5156,6 +5189,7 @@ const ( OpAddMaskedInt8x16 OpAndInt8x16 OpAndNotInt8x16 + OpCompressInt8x16 OpEqualInt8x16 OpEqualMaskedInt8x16 OpGreaterInt8x16 @@ -5189,6 +5223,7 @@ const ( OpAddMaskedInt8x32 OpAndInt8x32 OpAndNotInt8x32 + OpCompressInt8x32 OpEqualInt8x32 OpEqualMaskedInt8x32 OpGreaterInt8x32 @@ -5220,6 +5255,7 @@ const ( OpAbsoluteMaskedInt8x64 OpAddInt8x64 OpAddMaskedInt8x64 + OpCompressInt8x64 OpEqualInt8x64 OpEqualMaskedInt8x64 OpGreaterInt8x64 @@ -5250,6 +5286,7 @@ const ( OpAndNotUint16x16 OpAverageUint16x16 OpAverageMaskedUint16x16 + OpCompressUint16x16 OpEqualUint16x16 OpEqualMaskedUint16x16 OpGreaterUint16x16 @@ -5275,10 +5312,10 @@ const ( OpPermuteUint16x16 OpPermute2Uint16x16 OpPermute2Int16x16 - OpPermute2MaskedUint16x16 OpPermute2MaskedInt16x16 - OpPermuteMaskedUint16x16 + OpPermute2MaskedUint16x16 OpPermuteMaskedInt16x16 + OpPermuteMaskedUint16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5304,6 +5341,7 @@ const ( OpAddMaskedUint16x32 OpAverageUint16x32 OpAverageMaskedUint16x32 + OpCompressUint16x32 OpEqualUint16x32 OpEqualMaskedUint16x32 OpGreaterUint16x32 @@ -5322,12 +5360,12 @@ const ( OpMulHighMaskedUint16x32 OpNotEqualUint16x32 OpNotEqualMaskedUint16x32 - OpPermuteUint16x32 OpPermuteInt16x32 + OpPermuteUint16x32 OpPermute2Int16x32 OpPermute2Uint16x32 - OpPermute2MaskedUint16x32 OpPermute2MaskedInt16x32 + OpPermute2MaskedUint16x32 OpPermuteMaskedUint16x32 OpPermuteMaskedInt16x32 OpPopCountUint16x32 @@ -5356,6 +5394,7 @@ const ( OpAndNotUint16x8 OpAverageUint16x8 OpAverageMaskedUint16x8 + OpCompressUint16x8 OpEqualUint16x8 OpEqualMaskedUint16x8 OpGreaterUint16x8 @@ -5412,6 +5451,7 @@ const ( OpAndMaskedUint32x16 OpAndNotUint32x16 OpAndNotMaskedUint32x16 + OpCompressUint32x16 OpEqualUint32x16 OpEqualMaskedUint32x16 OpGreaterUint32x16 @@ -5431,17 +5471,17 @@ const ( OpOrUint32x16 OpOrMaskedUint32x16 OpPermuteInt32x16 - OpPermuteUint32x16 OpPermuteFloat32x16 - OpPermute2Int32x16 + OpPermuteUint32x16 OpPermute2Uint32x16 OpPermute2Float32x16 + OpPermute2Int32x16 OpPermute2MaskedUint32x16 OpPermute2MaskedInt32x16 OpPermute2MaskedFloat32x16 + OpPermuteMaskedFloat32x16 OpPermuteMaskedUint32x16 OpPermuteMaskedInt32x16 - OpPermuteMaskedFloat32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 @@ -5474,6 +5514,7 @@ const ( OpAndMaskedUint32x4 OpAndNotUint32x4 OpAndNotMaskedUint32x4 + OpCompressUint32x4 OpEqualUint32x4 OpEqualMaskedUint32x4 OpGreaterUint32x4 @@ -5496,11 +5537,11 @@ const ( OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPermute2Uint32x4 - OpPermute2Float32x4 OpPermute2Int32x4 - OpPermute2MaskedUint32x4 - OpPermute2MaskedInt32x4 + OpPermute2Float32x4 OpPermute2MaskedFloat32x4 + OpPermute2MaskedInt32x4 + OpPermute2MaskedUint32x4 OpPopCountUint32x4 OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 @@ -5533,6 +5574,7 @@ const ( OpAndMaskedUint32x8 OpAndNotUint32x8 OpAndNotMaskedUint32x8 + OpCompressUint32x8 OpEqualUint32x8 OpEqualMaskedUint32x8 OpGreaterUint32x8 @@ -5554,18 +5596,18 @@ const ( OpOrMaskedUint32x8 OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 + OpPermuteUint32x8 OpPermuteInt32x8 OpPermuteFloat32x8 - OpPermuteUint32x8 OpPermute2Uint32x8 OpPermute2Float32x8 OpPermute2Int32x8 OpPermute2MaskedFloat32x8 - OpPermute2MaskedUint32x8 OpPermute2MaskedInt32x8 + OpPermute2MaskedUint32x8 OpPermuteMaskedInt32x8 - OpPermuteMaskedFloat32x8 OpPermuteMaskedUint32x8 + OpPermuteMaskedFloat32x8 OpPopCountUint32x8 OpPopCountMaskedUint32x8 OpRotateLeftUint32x8 @@ -5598,6 +5640,7 @@ const ( OpAndMaskedUint64x2 OpAndNotUint64x2 OpAndNotMaskedUint64x2 + OpCompressUint64x2 OpEqualUint64x2 OpEqualMaskedUint64x2 OpGreaterUint64x2 @@ -5618,11 +5661,11 @@ const ( OpNotEqualMaskedUint64x2 OpOrUint64x2 OpOrMaskedUint64x2 + OpPermute2Float64x2 OpPermute2Uint64x2 OpPermute2Int64x2 - OpPermute2Float64x2 - OpPermute2MaskedUint64x2 OpPermute2MaskedInt64x2 + OpPermute2MaskedUint64x2 OpPermute2MaskedFloat64x2 OpPopCountUint64x2 OpPopCountMaskedUint64x2 @@ -5652,6 +5695,7 @@ const ( OpAndMaskedUint64x4 OpAndNotUint64x4 OpAndNotMaskedUint64x4 + OpCompressUint64x4 OpEqualUint64x4 OpEqualMaskedUint64x4 OpGreaterUint64x4 @@ -5672,18 +5716,18 @@ const ( OpNotEqualMaskedUint64x4 OpOrUint64x4 OpOrMaskedUint64x4 + OpPermuteFloat64x4 OpPermuteUint64x4 OpPermuteInt64x4 - OpPermuteFloat64x4 - OpPermute2Uint64x4 OpPermute2Int64x4 + OpPermute2Uint64x4 OpPermute2Float64x4 - OpPermute2MaskedInt64x4 - OpPermute2MaskedUint64x4 OpPermute2MaskedFloat64x4 + OpPermute2MaskedUint64x4 + OpPermute2MaskedInt64x4 OpPermuteMaskedFloat64x4 - OpPermuteMaskedInt64x4 OpPermuteMaskedUint64x4 + OpPermuteMaskedInt64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5712,6 +5756,7 @@ const ( OpAndMaskedUint64x8 OpAndNotUint64x8 OpAndNotMaskedUint64x8 + OpCompressUint64x8 OpEqualUint64x8 OpEqualMaskedUint64x8 OpGreaterUint64x8 @@ -5732,18 +5777,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 - OpPermuteUint64x8 OpPermuteInt64x8 + OpPermuteUint64x8 OpPermuteFloat64x8 - OpPermute2Int64x8 OpPermute2Uint64x8 OpPermute2Float64x8 + OpPermute2Int64x8 OpPermute2MaskedUint64x8 - OpPermute2MaskedInt64x8 OpPermute2MaskedFloat64x8 - OpPermuteMaskedFloat64x8 - OpPermuteMaskedInt64x8 + OpPermute2MaskedInt64x8 OpPermuteMaskedUint64x8 + OpPermuteMaskedInt64x8 + OpPermuteMaskedFloat64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -5772,6 +5817,7 @@ const ( OpAndNotUint8x16 OpAverageUint8x16 OpAverageMaskedUint8x16 + OpCompressUint8x16 OpEqualUint8x16 OpEqualMaskedUint8x16 OpGaloisFieldMulUint8x16 @@ -5793,12 +5839,12 @@ const ( OpOrUint8x16 OpPermuteUint8x16 OpPermuteInt8x16 - OpPermute2Uint8x16 OpPermute2Int8x16 + OpPermute2Uint8x16 OpPermute2MaskedInt8x16 OpPermute2MaskedUint8x16 - OpPermuteMaskedInt8x16 OpPermuteMaskedUint8x16 + OpPermuteMaskedInt8x16 OpPopCountUint8x16 OpPopCountMaskedUint8x16 OpSaturatedAddUint8x16 @@ -5816,6 +5862,7 @@ const ( OpAndNotUint8x32 OpAverageUint8x32 OpAverageMaskedUint8x32 + OpCompressUint8x32 OpEqualUint8x32 OpEqualMaskedUint8x32 OpGaloisFieldMulUint8x32 @@ -5839,10 +5886,10 @@ const ( OpPermuteInt8x32 OpPermute2Int8x32 OpPermute2Uint8x32 - OpPermute2MaskedUint8x32 OpPermute2MaskedInt8x32 - OpPermuteMaskedUint8x32 + OpPermute2MaskedUint8x32 OpPermuteMaskedInt8x32 + OpPermuteMaskedUint8x32 OpPopCountUint8x32 OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 @@ -5858,6 +5905,7 @@ const ( OpAddMaskedUint8x64 OpAverageUint8x64 OpAverageMaskedUint8x64 + OpCompressUint8x64 OpEqualUint8x64 OpEqualMaskedUint8x64 OpGaloisFieldMulUint8x64 @@ -5876,14 +5924,14 @@ const ( OpMinMaskedUint8x64 OpNotEqualUint8x64 OpNotEqualMaskedUint8x64 - OpPermuteUint8x64 OpPermuteInt8x64 - OpPermute2Int8x64 + OpPermuteUint8x64 OpPermute2Uint8x64 + OpPermute2Int8x64 OpPermute2MaskedUint8x64 OpPermute2MaskedInt8x64 - OpPermuteMaskedInt8x64 OpPermuteMaskedUint8x64 + OpPermuteMaskedInt8x64 OpPopCountUint8x64 OpPopCountMaskedUint8x64 OpSaturatedAddUint8x64 @@ -18850,6 +18898,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPSMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPS512", argLen: 2, @@ -19255,6 +19317,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPSMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPS128", argLen: 2, @@ -19688,6 +19764,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPSMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPS256", argLen: 2, @@ -20121,6 +20211,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPDMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD128", argLen: 2, @@ -20554,6 +20658,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPDMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD256", argLen: 2, @@ -20973,6 +21091,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPDMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD512", argLen: 2, @@ -21337,6 +21469,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSWMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQW256", argLen: 2, @@ -21945,6 +22091,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSWMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSW512", argLen: 2, @@ -22454,6 +22614,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSWMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQW128", argLen: 2, @@ -23122,6 +23296,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSDMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSD512", argLen: 2, @@ -23794,6 +23982,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSDMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQD128", argLen: 2, @@ -24522,6 +24724,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSDMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQD256", argLen: 2, @@ -25250,6 +25466,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSQMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQQ128", argLen: 2, @@ -25805,6 +26035,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSQMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQQ256", argLen: 2, @@ -26389,6 +26633,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSQMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSQ512", argLen: 2, @@ -26958,6 +27216,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSBMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB128", argLen: 2, @@ -27296,6 +27568,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSBMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB256", argLen: 2, @@ -27605,6 +27891,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSBMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSB512", argLen: 2, @@ -28578,9 +28878,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS512", + name: "VPERMD512", argLen: 2, - asm: x86.AVPERMPS, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28592,9 +28892,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMD512", + name: "VPERMPS512", argLen: 2, - asm: x86.AVPERMD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28606,10 +28906,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D512", + name: "VPERMI2PS512", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28622,10 +28922,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS512", + name: "VPERMI2D512", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28638,10 +28938,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked512", + name: "VPERMI2PSMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28655,10 +28955,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked512", + name: "VPERMI2DMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29038,9 +29338,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMD256", + name: "VPERMPS256", argLen: 2, - asm: x86.AVPERMD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29052,9 +29352,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS256", + name: "VPERMD256", argLen: 2, - asm: x86.AVPERMPS, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29098,10 +29398,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked256", + name: "VPERMI2DMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29115,10 +29415,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked256", + name: "VPERMI2PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29330,10 +29630,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked128", + name: "VPERMI2PDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2Q, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29347,10 +29647,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked128", + name: "VPERMI2QMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29528,10 +29828,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PD256", + name: "VPERMI2Q256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29544,10 +29844,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q256", + name: "VPERMI2PD256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2Q, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29594,9 +29894,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked256", + name: "VPERMQMasked256", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29609,9 +29909,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked256", + name: "VPERMPDMasked256", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29869,9 +30169,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked512", + name: "VPERMQMasked512", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29884,9 +30184,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked512", + name: "VPERMPDMasked512", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -60471,6 +60771,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressFloat32x16", + argLen: 2, + generic: true, + }, { name: "DivFloat32x16", argLen: 2, @@ -60695,6 +61000,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat32x4", + argLen: 2, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, @@ -60950,6 +61260,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat32x8", + argLen: 2, + generic: true, + }, { name: "DivFloat32x8", argLen: 2, @@ -61205,6 +61520,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat64x2", + argLen: 2, + generic: true, + }, { name: "DivFloat64x2", argLen: 2, @@ -61460,6 +61780,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat64x4", + argLen: 2, + generic: true, + }, { name: "DivFloat64x4", argLen: 2, @@ -61699,6 +62024,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressFloat64x8", + argLen: 2, + generic: true, + }, { name: "DivFloat64x8", argLen: 2, @@ -61914,6 +62244,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt16x16", + argLen: 2, + generic: true, + }, { name: "EqualInt16x16", argLen: 2, @@ -62197,6 +62532,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressInt16x32", + argLen: 2, + generic: true, + }, { name: "EqualInt16x32", argLen: 2, @@ -62454,6 +62794,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt16x8", + argLen: 2, + generic: true, + }, { name: "EqualInt16x8", argLen: 2, @@ -62759,6 +63104,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt32x16", + argLen: 2, + generic: true, + }, { name: "EqualInt32x16", argLen: 2, @@ -63067,6 +63417,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt32x4", + argLen: 2, + generic: true, + }, { name: "EqualInt32x4", argLen: 2, @@ -63396,6 +63751,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt32x8", + argLen: 2, + generic: true, + }, { name: "EqualInt32x8", argLen: 2, @@ -63725,6 +64085,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt64x2", + argLen: 2, + generic: true, + }, { name: "EqualInt64x2", argLen: 2, @@ -64005,6 +64370,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt64x4", + argLen: 2, + generic: true, + }, { name: "EqualInt64x4", argLen: 2, @@ -64285,6 +64655,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt64x8", + argLen: 2, + generic: true, + }, { name: "EqualInt64x8", argLen: 2, @@ -64554,6 +64929,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt8x16", + argLen: 2, + generic: true, + }, { name: "EqualInt8x16", argLen: 2, @@ -64734,6 +65114,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt8x32", + argLen: 2, + generic: true, + }, { name: "EqualInt8x32", argLen: 2, @@ -64903,6 +65288,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressInt8x64", + argLen: 2, + generic: true, + }, { name: "EqualInt8x64", argLen: 2, @@ -65068,6 +65458,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint16x16", + argLen: 2, + generic: true, + }, { name: "EqualUint16x16", argLen: 2, @@ -65205,22 +65600,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint16x16", + name: "Permute2MaskedInt16x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt16x16", + name: "Permute2MaskedUint16x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint16x16", + name: "PermuteMaskedInt16x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x16", + name: "PermuteMaskedUint16x16", argLen: 3, generic: true, }, @@ -65356,6 +65751,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint16x32", + argLen: 2, + generic: true, + }, { name: "EqualUint16x32", argLen: 2, @@ -65457,12 +65857,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint16x32", + name: "PermuteInt16x32", argLen: 2, generic: true, }, { - name: "PermuteInt16x32", + name: "PermuteUint16x32", argLen: 2, generic: true, }, @@ -65477,12 +65877,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint16x32", + name: "Permute2MaskedInt16x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt16x32", + name: "Permute2MaskedUint16x32", argLen: 4, generic: true, }, @@ -65633,6 +66033,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint16x8", + argLen: 2, + generic: true, + }, { name: "EqualUint16x8", argLen: 2, @@ -65931,6 +66336,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint32x16", + argLen: 2, + generic: true, + }, { name: "EqualUint32x16", argLen: 2, @@ -66037,27 +66447,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint32x16", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x16", + name: "PermuteUint32x16", argLen: 2, generic: true, }, { - name: "Permute2Int32x16", + name: "Permute2Uint32x16", argLen: 3, generic: true, }, { - name: "Permute2Uint32x16", + name: "Permute2Float32x16", argLen: 3, generic: true, }, { - name: "Permute2Float32x16", + name: "Permute2Int32x16", argLen: 3, generic: true, }, @@ -66077,17 +66487,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint32x16", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "PermuteMaskedUint32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat32x16", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, @@ -66257,6 +66667,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint32x4", + argLen: 2, + generic: true, + }, { name: "EqualUint32x4", argLen: 2, @@ -66379,17 +66794,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Float32x4", + name: "Permute2Int32x4", argLen: 3, generic: true, }, { - name: "Permute2Int32x4", + name: "Permute2Float32x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint32x4", + name: "Permute2MaskedFloat32x4", argLen: 4, generic: true, }, @@ -66399,7 +66814,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedFloat32x4", + name: "Permute2MaskedUint32x4", argLen: 4, generic: true, }, @@ -66569,6 +66984,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint32x8", + argLen: 2, + generic: true, + }, { name: "EqualUint32x8", argLen: 2, @@ -66686,17 +67106,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt32x8", + name: "PermuteUint32x8", argLen: 2, generic: true, }, { - name: "PermuteFloat32x8", + name: "PermuteInt32x8", argLen: 2, generic: true, }, { - name: "PermuteUint32x8", + name: "PermuteFloat32x8", argLen: 2, generic: true, }, @@ -66721,12 +67141,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint32x8", + name: "Permute2MaskedInt32x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt32x8", + name: "Permute2MaskedUint32x8", argLen: 4, generic: true, }, @@ -66736,12 +67156,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedFloat32x8", + name: "PermuteMaskedUint32x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x8", + name: "PermuteMaskedFloat32x8", argLen: 3, generic: true, }, @@ -66911,6 +67331,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint64x2", + argLen: 2, + generic: true, + }, { name: "EqualUint64x2", argLen: 2, @@ -67024,27 +67449,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint64x2", + name: "Permute2Float64x2", argLen: 3, generic: true, }, { - name: "Permute2Int64x2", + name: "Permute2Uint64x2", argLen: 3, generic: true, }, { - name: "Permute2Float64x2", + name: "Permute2Int64x2", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x2", + name: "Permute2MaskedInt64x2", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt64x2", + name: "Permute2MaskedUint64x2", argLen: 4, generic: true, }, @@ -67199,6 +67624,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint64x4", + argLen: 2, + generic: true, + }, { name: "EqualUint64x4", argLen: 2, @@ -67312,27 +67742,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint64x4", + name: "PermuteFloat64x4", argLen: 2, generic: true, }, { - name: "PermuteInt64x4", + name: "PermuteUint64x4", argLen: 2, generic: true, }, { - name: "PermuteFloat64x4", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "Permute2Uint64x4", + name: "Permute2Int64x4", argLen: 3, generic: true, }, { - name: "Permute2Int64x4", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, @@ -67342,7 +67772,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt64x4", + name: "Permute2MaskedFloat64x4", argLen: 4, generic: true, }, @@ -67352,7 +67782,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedFloat64x4", + name: "Permute2MaskedInt64x4", argLen: 4, generic: true, }, @@ -67362,12 +67792,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "PermuteMaskedUint64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "PermuteMaskedInt64x4", argLen: 3, generic: true, }, @@ -67517,6 +67947,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint64x8", + argLen: 2, + generic: true, + }, { name: "EqualUint64x8", argLen: 2, @@ -67630,12 +68065,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint64x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "PermuteInt64x8", + name: "PermuteUint64x8", argLen: 2, generic: true, }, @@ -67645,17 +68080,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int64x8", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, { - name: "Permute2Uint64x8", + name: "Permute2Float64x8", argLen: 3, generic: true, }, { - name: "Permute2Float64x8", + name: "Permute2Int64x8", argLen: 3, generic: true, }, @@ -67665,17 +68100,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt64x8", + name: "Permute2MaskedFloat64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat64x8", + name: "Permute2MaskedInt64x8", argLen: 4, generic: true, }, { - name: "PermuteMaskedFloat64x8", + name: "PermuteMaskedUint64x8", argLen: 3, generic: true, }, @@ -67685,7 +68120,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint64x8", + name: "PermuteMaskedFloat64x8", argLen: 3, generic: true, }, @@ -67836,6 +68271,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint8x16", + argLen: 2, + generic: true, + }, { name: "EqualUint8x16", argLen: 2, @@ -67951,12 +68391,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint8x16", + name: "Permute2Int8x16", argLen: 3, generic: true, }, { - name: "Permute2Int8x16", + name: "Permute2Uint8x16", argLen: 3, generic: true, }, @@ -67971,12 +68411,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt8x16", + name: "PermuteMaskedUint8x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x16", + name: "PermuteMaskedInt8x16", argLen: 3, generic: true, }, @@ -68073,6 +68513,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint8x32", + argLen: 2, + generic: true, + }, { name: "EqualUint8x32", argLen: 2, @@ -68198,22 +68643,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint8x32", + name: "Permute2MaskedInt8x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt8x32", + name: "Permute2MaskedUint8x32", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint8x32", + name: "PermuteMaskedInt8x32", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt8x32", + name: "PermuteMaskedUint8x32", argLen: 3, generic: true, }, @@ -68299,6 +68744,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint8x64", + argLen: 2, + generic: true, + }, { name: "EqualUint8x64", argLen: 2, @@ -68398,22 +68848,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint8x64", + name: "PermuteInt8x64", argLen: 2, generic: true, }, { - name: "PermuteInt8x64", + name: "PermuteUint8x64", argLen: 2, generic: true, }, { - name: "Permute2Int8x64", + name: "Permute2Uint8x64", argLen: 3, generic: true, }, { - name: "Permute2Uint8x64", + name: "Permute2Int8x64", argLen: 3, generic: true, }, @@ -68428,12 +68878,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt8x64", + name: "PermuteMaskedUint8x64", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x64", + name: "PermuteMaskedInt8x64", argLen: 3, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 1aa36bee04202a..53dffe10e4e036 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1185,6 +1185,66 @@ func rewriteValueAMD64(v *Value) bool { case OpCom8: v.Op = OpAMD64NOTL return true + case OpCompressFloat32x16: + return rewriteValueAMD64_OpCompressFloat32x16(v) + case OpCompressFloat32x4: + return rewriteValueAMD64_OpCompressFloat32x4(v) + case OpCompressFloat32x8: + return rewriteValueAMD64_OpCompressFloat32x8(v) + case OpCompressFloat64x2: + return rewriteValueAMD64_OpCompressFloat64x2(v) + case OpCompressFloat64x4: + return rewriteValueAMD64_OpCompressFloat64x4(v) + case OpCompressFloat64x8: + return rewriteValueAMD64_OpCompressFloat64x8(v) + case OpCompressInt16x16: + return rewriteValueAMD64_OpCompressInt16x16(v) + case OpCompressInt16x32: + return rewriteValueAMD64_OpCompressInt16x32(v) + case OpCompressInt16x8: + return rewriteValueAMD64_OpCompressInt16x8(v) + case OpCompressInt32x16: + return rewriteValueAMD64_OpCompressInt32x16(v) + case OpCompressInt32x4: + return rewriteValueAMD64_OpCompressInt32x4(v) + case OpCompressInt32x8: + return rewriteValueAMD64_OpCompressInt32x8(v) + case OpCompressInt64x2: + return rewriteValueAMD64_OpCompressInt64x2(v) + case OpCompressInt64x4: + return rewriteValueAMD64_OpCompressInt64x4(v) + case OpCompressInt64x8: + return rewriteValueAMD64_OpCompressInt64x8(v) + case OpCompressInt8x16: + return rewriteValueAMD64_OpCompressInt8x16(v) + case OpCompressInt8x32: + return rewriteValueAMD64_OpCompressInt8x32(v) + case OpCompressInt8x64: + return rewriteValueAMD64_OpCompressInt8x64(v) + case OpCompressUint16x16: + return rewriteValueAMD64_OpCompressUint16x16(v) + case OpCompressUint16x32: + return rewriteValueAMD64_OpCompressUint16x32(v) + case OpCompressUint16x8: + return rewriteValueAMD64_OpCompressUint16x8(v) + case OpCompressUint32x16: + return rewriteValueAMD64_OpCompressUint32x16(v) + case OpCompressUint32x4: + return rewriteValueAMD64_OpCompressUint32x4(v) + case OpCompressUint32x8: + return rewriteValueAMD64_OpCompressUint32x8(v) + case OpCompressUint64x2: + return rewriteValueAMD64_OpCompressUint64x2(v) + case OpCompressUint64x4: + return rewriteValueAMD64_OpCompressUint64x4(v) + case OpCompressUint64x8: + return rewriteValueAMD64_OpCompressUint64x8(v) + case OpCompressUint8x16: + return rewriteValueAMD64_OpCompressUint8x16(v) + case OpCompressUint8x32: + return rewriteValueAMD64_OpCompressUint8x32(v) + case OpCompressUint8x64: + return rewriteValueAMD64_OpCompressUint8x64(v) case OpCondSelect: return rewriteValueAMD64_OpCondSelect(v) case OpConst16: @@ -30451,6 +30511,486 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpCompressFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat32x16 x mask) + // result: (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat32x4 x mask) + // result: (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat32x8 x mask) + // result: (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat64x2 x mask) + // result: (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat64x4 x mask) + // result: (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat64x8 x mask) + // result: (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt16x16 x mask) + // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt16x32 x mask) + // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt16x8 x mask) + // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt32x16 x mask) + // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt32x4 x mask) + // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt32x8 x mask) + // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt64x2 x mask) + // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt64x4 x mask) + // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt64x8 x mask) + // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt8x16 x mask) + // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt8x32 x mask) + // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt8x64 x mask) + // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint16x16 x mask) + // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint16x32 x mask) + // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint16x8 x mask) + // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint32x16 x mask) + // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint32x4 x mask) + // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint32x8 x mask) + // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint64x2 x mask) + // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint64x4 x mask) + // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint64x8 x mask) + // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint8x16 x mask) + // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint8x32 x mask) + // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint8x64 x mask) + // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpCondSelect(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3805ca35a872c9..1ef4369fa27bc4 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -215,6 +215,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.Compress", opLen2(ssa.OpCompressFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Compress", opLen2(ssa.OpCompressFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Compress", opLen2(ssa.OpCompressFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Compress", opLen2(ssa.OpCompressFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Compress", opLen2(ssa.OpCompressFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Compress", opLen2(ssa.OpCompressFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Compress", opLen2(ssa.OpCompressInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Compress", opLen2(ssa.OpCompressInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Compress", opLen2(ssa.OpCompressInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Compress", opLen2(ssa.OpCompressInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Compress", opLen2(ssa.OpCompressInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Compress", opLen2(ssa.OpCompressInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Compress", opLen2(ssa.OpCompressInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Compress", opLen2(ssa.OpCompressInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Compress", opLen2(ssa.OpCompressInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Compress", opLen2(ssa.OpCompressInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Compress", opLen2(ssa.OpCompressInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Compress", opLen2(ssa.OpCompressInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Compress", opLen2(ssa.OpCompressUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Compress", opLen2(ssa.OpCompressUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Compress", opLen2(ssa.OpCompressUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Compress", opLen2(ssa.OpCompressUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Compress", opLen2(ssa.OpCompressUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Compress", opLen2(ssa.OpCompressUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Compress", opLen2(ssa.OpCompressUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Compress", opLen2(ssa.OpCompressUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Compress", opLen2(ssa.OpCompressUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ebb626358f8a24..7121a6d208ffd7 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1084,6 +1084,188 @@ func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +/* Compress */ + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPS, CPU Feature: AVX512F +func (x Float32x4) Compress(mask Mask32x4) Float32x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPS, CPU Feature: AVX512F +func (x Float32x8) Compress(mask Mask32x8) Float32x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPS, CPU Feature: AVX512F +func (x Float32x16) Compress(mask Mask32x16) Float32x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPD, CPU Feature: AVX512F +func (x Float64x2) Compress(mask Mask64x2) Float64x2 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPD, CPU Feature: AVX512F +func (x Float64x4) Compress(mask Mask64x4) Float64x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPD, CPU Feature: AVX512F +func (x Float64x8) Compress(mask Mask64x8) Float64x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Int8x16) Compress(mask Mask8x16) Int8x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Int8x32) Compress(mask Mask8x32) Int8x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Int8x64) Compress(mask Mask8x64) Int8x64 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Int16x8) Compress(mask Mask16x8) Int16x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Int16x16) Compress(mask Mask16x16) Int16x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Int16x32) Compress(mask Mask16x32) Int16x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Int32x4) Compress(mask Mask32x4) Int32x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Int32x8) Compress(mask Mask32x8) Int32x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Int32x16) Compress(mask Mask32x16) Int32x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Int64x2) Compress(mask Mask64x2) Int64x2 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Int64x4) Compress(mask Mask64x4) Int64x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Int64x8) Compress(mask Mask64x8) Int64x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Uint8x16) Compress(mask Mask8x16) Uint8x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Uint8x32) Compress(mask Mask8x32) Uint8x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Uint8x64) Compress(mask Mask8x64) Uint8x64 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Uint16x8) Compress(mask Mask16x8) Uint16x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Uint16x16) Compress(mask Mask16x16) Uint16x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Uint16x32) Compress(mask Mask16x32) Uint16x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Uint32x4) Compress(mask Mask32x4) Uint32x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Uint32x8) Compress(mask Mask32x8) Uint32x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Uint32x16) Compress(mask Mask32x16) Uint32x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Uint64x2) Compress(mask Mask64x2) Uint64x2 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 + /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index f1a2f11738c2e5..d7010de10a94d0 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -186,6 +186,16 @@ func TestPermute2(t *testing.T) { } } +func TestCompress(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + testInt32x4Mask32x4Int32x4(t, []int32{1, 2, 3, 4}, + []int32{0, -1, 0, -1}, + []int32{2, 4, 0, 0}, "Compress") +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 29452bdad0e9b4..8f0fb665be6e57 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -117,6 +117,27 @@ func testFloat32x4Compare(t *testing.T, v0 []float32, v1 []float32, want []int32 } } +func testFloat32x4Mask32x4Float32x4(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -369,6 +390,27 @@ func testFloat32x8Compare(t *testing.T, v0 []float32, v1 []float32, want []int32 } } +func testFloat32x8Mask32x8Float32x8(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -613,6 +655,27 @@ func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int3 } } +func testFloat32x16Mask32x16Float32x16(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -857,6 +920,27 @@ func testFloat64x2Compare(t *testing.T, v0 []float64, v1 []float64, want []int64 } } +func testFloat64x2Mask64x2Float64x2(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 @@ -1107,6 +1191,27 @@ func testFloat64x4Compare(t *testing.T, v0 []float64, v1 []float64, want []int64 } } +func testFloat64x4Mask64x4Float64x4(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -1351,6 +1456,27 @@ func testFloat64x8Compare(t *testing.T, v0 []float64, v1 []float64, want []int64 } } +func testFloat64x8Mask64x8Float64x8(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 @@ -1591,6 +1717,27 @@ func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } +func testInt8x16Mask8x16Int8x16(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 @@ -1772,6 +1919,27 @@ func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } +func testInt8x32Mask8x32Int8x32(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 @@ -1943,6 +2111,27 @@ func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } +func testInt8x64Mask8x64Int8x64(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 @@ -2191,6 +2380,27 @@ func testInt16x8Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whic } } +func testInt16x8Mask16x8Int16x8(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x8 @@ -2488,6 +2698,27 @@ func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } +func testInt16x16Mask16x16Int16x16(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x16 @@ -2767,6 +2998,27 @@ func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } +func testInt16x32Mask16x32Int16x32(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x32 @@ -3091,6 +3343,27 @@ func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int } } +func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3464,6 +3737,27 @@ func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []i } } +func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -3810,16 +4104,37 @@ func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 } } -func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 got := make([]int32, len(want)) vec0 := simd.LoadInt32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() case "GreaterEqualMasked": gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() case "GreaterMasked": @@ -4111,6 +4426,27 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } +func testInt64x2Mask64x2Int64x2(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 @@ -4363,6 +4699,27 @@ func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } +func testInt64x4Mask64x4Int64x4(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -4615,6 +4972,27 @@ func testInt64x8Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } +func testInt64x8Mask64x8Int64x8(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 @@ -4894,6 +5272,27 @@ func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 } } +func testUint8x16Mask8x16Uint8x16(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 @@ -5120,6 +5519,27 @@ func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v } } +func testUint8x32Mask8x32Uint8x32(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 @@ -5338,6 +5758,27 @@ func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v } } +func testUint8x64Mask8x64Uint8x64(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 @@ -5533,6 +5974,27 @@ func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, w } } +func testUint16x8Mask16x8Uint16x8(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x8 @@ -5777,6 +6239,27 @@ func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } +func testUint16x16Mask16x16Uint16x16(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x16 @@ -6009,6 +6492,27 @@ func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } +func testUint16x32Mask16x32Uint16x32(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x32 @@ -6274,6 +6778,27 @@ func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, w } } +func testUint32x4Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -6588,6 +7113,27 @@ func testUint32x8Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, w } } +func testUint32x8Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -6877,6 +7423,27 @@ func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, } } +func testUint32x16Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -7170,6 +7737,27 @@ func testUint64x2Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w } } +func testUint64x2Mask64x2Uint64x2(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 @@ -7414,6 +8002,27 @@ func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w } } +func testUint64x4Mask64x4Uint64x4(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -7658,6 +8267,27 @@ func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w } } +func testUint64x8Mask64x8Uint64x8(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 From ef5f6cc92109ee18d978f81650f93fd8a254b8d2 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 22:00:29 +0000 Subject: [PATCH 088/139] [dev.simd] cmd/compile: adjust param order for AndNot This CL adjusts the parameter order of AndNot, making it x &^ y instead of ^x & y. This CL also added a test. This CL is partially generated by CL 687977. Change-Id: I244e7b887991dc97e695131a5287af1b0e6fc3ce Reviewed-on: https://go-review.googlesource.com/c/go/+/687996 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- .../compile/internal/ssagen/simdintrinsics.go | 64 +++++++++---------- src/simd/ops_amd64.go | 64 +++++++++---------- src/simd/simd_test.go | 6 ++ 3 files changed, 70 insertions(+), 64 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 1ef4369fa27bc4..1472f5ec1a7fb5 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -131,38 +131,38 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AndMasked", opLen3(ssa.OpAndMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AndMasked", opLen3(ssa.OpAndMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AndMasked", opLen3(ssa.OpAndMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.AndNot", opLen2(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.AndNot", opLen2(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.AndNot", opLen2(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AndNot", opLen2_21(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AndNot", opLen2_21(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AndNot", opLen2_21(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AndNot", opLen2_21(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.AndNot", opLen2_21(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNot", opLen2_21(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNot", opLen2_21(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNot", opLen2_21(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNot", opLen2_21(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNot", opLen2_21(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AndNot", opLen2_21(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AndNot", opLen2_21(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.AndNot", opLen2_21(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AndNot", opLen2_21(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNot", opLen2_21(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNot", opLen2_21(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNot", opLen2_21(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNot", opLen2_21(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNot", opLen2_21(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNot", opLen2_21(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 7121a6d208ffd7..3b87836962a134 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -620,164 +620,164 @@ func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AndNot */ -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int8x16) AndNot(y Int8x16) Int8x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int8x32) AndNot(y Int8x32) Int8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int16x8) AndNot(y Int16x8) Int16x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int16x16) AndNot(y Int16x16) Int16x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int32x4) AndNot(y Int32x4) Int32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int32x8) AndNot(y Int32x8) Int32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNot(y Int32x16) Int32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int64x2) AndNot(y Int64x2) Int64x2 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int64x4) AndNot(y Int64x4) Int64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNot(y Int64x8) Int64x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index d7010de10a94d0..d19889cc76482e 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -196,6 +196,12 @@ func TestCompress(t *testing.T) { []int32{2, 4, 0, 0}, "Compress") } +func TestAndNot(t *testing.T) { + testInt32x4Binary(t, []int32{0b11, 0b00, 0b11, 0b00}, + []int32{0b01, 0b01, 0b01, 0b01}, + []int32{0b10, 0b00, 0b10, 0b00}, "AndNot") +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { From c61743e4f0dde8870df5ac157f88353362d76b55 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 15 Jul 2025 05:13:55 +0000 Subject: [PATCH 089/139] [dev.simd] cmd/compile, simd: reorder PairDotProdAccumulate This CL reorderes the param order of PairDotProdAccumulate family to be dotprod(x, y) + z instead of the old dotprod(y, z) + x. This CL also updates some documentation of other ML Ops. This CL added a test to test the behavior is correct. This CL is partially generated by CL 688115. Change-Id: I76a6ee55a2ad8e3aff388d7e4fa5218ec0e4800d Reviewed-on: https://go-review.googlesource.com/c/go/+/688095 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssa/_gen/simdAMD64.rules | 12 - .../internal/ssa/_gen/simdgenericOps.go | 68 ++- src/cmd/compile/internal/ssa/opGen.go | 246 ++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 150 ------ src/cmd/compile/internal/ssagen/intrinsics.go | 12 + .../compile/internal/ssagen/simdintrinsics.go | 60 +-- src/simd/ops_amd64.go | 228 ++++----- src/simd/simd_test.go | 19 + src/simd/simd_wrapped_test.go | 449 +----------------- 9 files changed, 262 insertions(+), 982 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 88744174300ced..e5f17bdb1b29dd 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1350,15 +1350,9 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (Set128Float32x8 ...) => (VINSERTF128256 ...) (Set128Float64x4 ...) => (VINSERTF128256 ...) (Set128Int8x32 ...) => (VINSERTI128256 ...) @@ -1762,15 +1756,9 @@ (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 00e4baf141de5f..c8fe1e9eeee6d3 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -914,8 +914,8 @@ func simdGenericOps() []opData { {name: "Permute2Int16x16", argLength: 3, commutative: false}, {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -960,12 +960,12 @@ func simdGenericOps() []opData { {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "PermuteInt16x32", argLength: 2, commutative: false}, {name: "PermuteUint16x32", argLength: 2, commutative: false}, - {name: "Permute2Int16x32", argLength: 3, commutative: false}, + {name: "PermuteInt16x32", argLength: 2, commutative: false}, {name: "Permute2Uint16x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "Permute2Int16x32", argLength: 3, commutative: false}, {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, @@ -1016,14 +1016,14 @@ func simdGenericOps() []opData { {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, - {name: "PermuteUint16x8", argLength: 2, commutative: false}, {name: "PermuteInt16x8", argLength: 2, commutative: false}, + {name: "PermuteUint16x8", argLength: 2, commutative: false}, {name: "Permute2Int16x8", argLength: 3, commutative: false}, {name: "Permute2Uint16x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, @@ -1070,26 +1070,24 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, - {name: "PermuteInt32x16", argLength: 2, commutative: false}, {name: "PermuteFloat32x16", argLength: 2, commutative: false}, + {name: "PermuteInt32x16", argLength: 2, commutative: false}, {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "Permute2Uint32x16", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, {name: "Permute2Int32x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "RotateRightUint32x16", argLength: 2, commutative: false}, {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, @@ -1104,8 +1102,6 @@ func simdGenericOps() []opData { {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, @@ -1136,20 +1132,18 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Uint32x4", argLength: 3, commutative: false}, {name: "Permute2Int32x4", argLength: 3, commutative: false}, - {name: "Permute2Float32x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "RotateRightUint32x4", argLength: 2, commutative: false}, {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, @@ -1164,8 +1158,6 @@ func simdGenericOps() []opData { {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, @@ -1197,14 +1189,14 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PermuteUint32x8", argLength: 2, commutative: false}, - {name: "PermuteInt32x8", argLength: 2, commutative: false}, {name: "PermuteFloat32x8", argLength: 2, commutative: false}, - {name: "Permute2Uint32x8", argLength: 3, commutative: false}, - {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "PermuteInt32x8", argLength: 2, commutative: false}, {name: "Permute2Int32x8", argLength: 3, commutative: false}, + {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "Permute2Uint32x8", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, @@ -1214,8 +1206,6 @@ func simdGenericOps() []opData { {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "RotateRightUint32x8", argLength: 2, commutative: false}, {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, @@ -1230,8 +1220,6 @@ func simdGenericOps() []opData { {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, @@ -1265,8 +1253,8 @@ func simdGenericOps() []opData { {name: "Permute2Uint64x2", argLength: 3, commutative: false}, {name: "Permute2Int64x2", argLength: 3, commutative: false}, {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, @@ -1316,18 +1304,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, - {name: "PermuteFloat64x4", argLength: 2, commutative: false}, {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, {name: "Permute2Int64x4", argLength: 3, commutative: false}, {name: "Permute2Uint64x4", argLength: 3, commutative: false}, - {name: "Permute2Float64x4", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1377,18 +1365,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "PermuteFloat64x8", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "PermuteFloat64x8", argLength: 2, commutative: false}, - {name: "Permute2Uint64x8", argLength: 3, commutative: false}, - {name: "Permute2Float64x8", argLength: 3, commutative: false}, {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2Uint64x8", argLength: 3, commutative: false}, {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, @@ -1439,8 +1427,8 @@ func simdGenericOps() []opData { {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "PermuteUint8x16", argLength: 2, commutative: false}, {name: "PermuteInt8x16", argLength: 2, commutative: false}, - {name: "Permute2Int8x16", argLength: 3, commutative: false}, {name: "Permute2Uint8x16", argLength: 3, commutative: false}, + {name: "Permute2Int8x16", argLength: 3, commutative: false}, {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, @@ -1486,10 +1474,10 @@ func simdGenericOps() []opData { {name: "PermuteInt8x32", argLength: 2, commutative: false}, {name: "Permute2Int8x32", argLength: 3, commutative: false}, {name: "Permute2Uint8x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 35612493ea39e4..29058f0b193bc0 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5314,8 +5314,8 @@ const ( OpPermute2Int16x16 OpPermute2MaskedInt16x16 OpPermute2MaskedUint16x16 - OpPermuteMaskedInt16x16 OpPermuteMaskedUint16x16 + OpPermuteMaskedInt16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5360,12 +5360,12 @@ const ( OpMulHighMaskedUint16x32 OpNotEqualUint16x32 OpNotEqualMaskedUint16x32 - OpPermuteInt16x32 OpPermuteUint16x32 - OpPermute2Int16x32 + OpPermuteInt16x32 OpPermute2Uint16x32 - OpPermute2MaskedInt16x32 + OpPermute2Int16x32 OpPermute2MaskedUint16x32 + OpPermute2MaskedInt16x32 OpPermuteMaskedUint16x32 OpPermuteMaskedInt16x32 OpPopCountUint16x32 @@ -5416,14 +5416,14 @@ const ( OpOrUint16x8 OpPairwiseAddUint16x8 OpPairwiseSubUint16x8 - OpPermuteUint16x8 OpPermuteInt16x8 + OpPermuteUint16x8 OpPermute2Int16x8 OpPermute2Uint16x8 - OpPermute2MaskedUint16x8 OpPermute2MaskedInt16x8 - OpPermuteMaskedInt16x8 + OpPermute2MaskedUint16x8 OpPermuteMaskedUint16x8 + OpPermuteMaskedInt16x8 OpPopCountUint16x8 OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 @@ -5470,26 +5470,24 @@ const ( OpNotEqualMaskedUint32x16 OpOrUint32x16 OpOrMaskedUint32x16 - OpPermuteInt32x16 OpPermuteFloat32x16 + OpPermuteInt32x16 OpPermuteUint32x16 OpPermute2Uint32x16 OpPermute2Float32x16 OpPermute2Int32x16 - OpPermute2MaskedUint32x16 OpPermute2MaskedInt32x16 OpPermute2MaskedFloat32x16 + OpPermute2MaskedUint32x16 + OpPermuteMaskedInt32x16 OpPermuteMaskedFloat32x16 OpPermuteMaskedUint32x16 - OpPermuteMaskedInt32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 OpRotateLeftMaskedUint32x16 OpRotateRightUint32x16 OpRotateRightMaskedUint32x16 - OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpShiftAllLeftUint32x16 OpShiftAllLeftMaskedUint32x16 OpShiftAllRightUint32x16 @@ -5504,8 +5502,6 @@ const ( OpShiftRightMaskedUint32x16 OpSubUint32x16 OpSubMaskedUint32x16 - OpUnsignedSignedQuadDotProdAccumulateUint32x16 - OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpXorUint32x16 OpXorMaskedUint32x16 OpAddUint32x4 @@ -5536,20 +5532,18 @@ const ( OpOrMaskedUint32x4 OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 + OpPermute2Float32x4 OpPermute2Uint32x4 OpPermute2Int32x4 - OpPermute2Float32x4 - OpPermute2MaskedFloat32x4 OpPermute2MaskedInt32x4 OpPermute2MaskedUint32x4 + OpPermute2MaskedFloat32x4 OpPopCountUint32x4 OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 OpRotateLeftMaskedUint32x4 OpRotateRightUint32x4 OpRotateRightMaskedUint32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpShiftAllLeftUint32x4 OpShiftAllLeftMaskedUint32x4 OpShiftAllRightUint32x4 @@ -5564,8 +5558,6 @@ const ( OpShiftRightMaskedUint32x4 OpSubUint32x4 OpSubMaskedUint32x4 - OpUnsignedSignedQuadDotProdAccumulateUint32x4 - OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpXorUint32x4 OpXorMaskedUint32x4 OpAddUint32x8 @@ -5597,14 +5589,14 @@ const ( OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPermuteUint32x8 - OpPermuteInt32x8 OpPermuteFloat32x8 - OpPermute2Uint32x8 - OpPermute2Float32x8 + OpPermuteInt32x8 OpPermute2Int32x8 + OpPermute2Float32x8 + OpPermute2Uint32x8 OpPermute2MaskedFloat32x8 - OpPermute2MaskedInt32x8 OpPermute2MaskedUint32x8 + OpPermute2MaskedInt32x8 OpPermuteMaskedInt32x8 OpPermuteMaskedUint32x8 OpPermuteMaskedFloat32x8 @@ -5614,8 +5606,6 @@ const ( OpRotateLeftMaskedUint32x8 OpRotateRightUint32x8 OpRotateRightMaskedUint32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpShiftAllLeftUint32x8 OpShiftAllLeftMaskedUint32x8 OpShiftAllRightUint32x8 @@ -5630,8 +5620,6 @@ const ( OpShiftRightMaskedUint32x8 OpSubUint32x8 OpSubMaskedUint32x8 - OpUnsignedSignedQuadDotProdAccumulateUint32x8 - OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpXorUint32x8 OpXorMaskedUint32x8 OpAddUint64x2 @@ -5665,8 +5653,8 @@ const ( OpPermute2Uint64x2 OpPermute2Int64x2 OpPermute2MaskedInt64x2 - OpPermute2MaskedUint64x2 OpPermute2MaskedFloat64x2 + OpPermute2MaskedUint64x2 OpPopCountUint64x2 OpPopCountMaskedUint64x2 OpRotateLeftUint64x2 @@ -5716,18 +5704,18 @@ const ( OpNotEqualMaskedUint64x4 OpOrUint64x4 OpOrMaskedUint64x4 - OpPermuteFloat64x4 OpPermuteUint64x4 OpPermuteInt64x4 + OpPermuteFloat64x4 + OpPermute2Float64x4 OpPermute2Int64x4 OpPermute2Uint64x4 - OpPermute2Float64x4 OpPermute2MaskedFloat64x4 OpPermute2MaskedUint64x4 OpPermute2MaskedInt64x4 OpPermuteMaskedFloat64x4 - OpPermuteMaskedUint64x4 OpPermuteMaskedInt64x4 + OpPermuteMaskedUint64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5777,18 +5765,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 + OpPermuteFloat64x8 OpPermuteInt64x8 OpPermuteUint64x8 - OpPermuteFloat64x8 - OpPermute2Uint64x8 - OpPermute2Float64x8 OpPermute2Int64x8 + OpPermute2Float64x8 + OpPermute2Uint64x8 OpPermute2MaskedUint64x8 - OpPermute2MaskedFloat64x8 OpPermute2MaskedInt64x8 + OpPermute2MaskedFloat64x8 OpPermuteMaskedUint64x8 - OpPermuteMaskedInt64x8 OpPermuteMaskedFloat64x8 + OpPermuteMaskedInt64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -5839,8 +5827,8 @@ const ( OpOrUint8x16 OpPermuteUint8x16 OpPermuteInt8x16 - OpPermute2Int8x16 OpPermute2Uint8x16 + OpPermute2Int8x16 OpPermute2MaskedInt8x16 OpPermute2MaskedUint8x16 OpPermuteMaskedUint8x16 @@ -5886,10 +5874,10 @@ const ( OpPermuteInt8x32 OpPermute2Int8x32 OpPermute2Uint8x32 - OpPermute2MaskedInt8x32 OpPermute2MaskedUint8x32 - OpPermuteMaskedInt8x32 + OpPermute2MaskedInt8x32 OpPermuteMaskedUint8x32 + OpPermuteMaskedInt8x32 OpPopCountUint8x32 OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 @@ -65610,12 +65598,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt16x16", + name: "PermuteMaskedUint16x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint16x16", + name: "PermuteMaskedInt16x16", argLen: 3, generic: true, }, @@ -65857,32 +65845,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt16x32", + name: "PermuteUint16x32", argLen: 2, generic: true, }, { - name: "PermuteUint16x32", + name: "PermuteInt16x32", argLen: 2, generic: true, }, { - name: "Permute2Int16x32", + name: "Permute2Uint16x32", argLen: 3, generic: true, }, { - name: "Permute2Uint16x32", + name: "Permute2Int16x32", argLen: 3, generic: true, }, { - name: "Permute2MaskedInt16x32", + name: "Permute2MaskedUint16x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint16x32", + name: "Permute2MaskedInt16x32", argLen: 4, generic: true, }, @@ -66155,12 +66143,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint16x8", + name: "PermuteInt16x8", argLen: 2, generic: true, }, { - name: "PermuteInt16x8", + name: "PermuteUint16x8", argLen: 2, generic: true, }, @@ -66175,22 +66163,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint16x8", + name: "Permute2MaskedInt16x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt16x8", + name: "Permute2MaskedUint16x8", argLen: 4, generic: true, }, { - name: "PermuteMaskedInt16x8", + name: "PermuteMaskedUint16x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint16x8", + name: "PermuteMaskedInt16x8", argLen: 3, generic: true, }, @@ -66442,12 +66430,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt32x16", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x16", + name: "PermuteInt32x16", argLen: 2, generic: true, }, @@ -66472,32 +66460,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint32x16", + name: "Permute2MaskedInt32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt32x16", + name: "Permute2MaskedFloat32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat32x16", + name: "Permute2MaskedUint32x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedFloat32x16", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x16", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "PermuteMaskedUint32x16", argLen: 3, generic: true, }, @@ -66531,16 +66519,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftUint32x16", argLen: 2, @@ -66611,16 +66589,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", - argLen: 4, - generic: true, - }, { name: "XorUint32x16", argLen: 2, @@ -66789,32 +66757,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint32x4", + name: "Permute2Float32x4", argLen: 3, generic: true, }, { - name: "Permute2Int32x4", + name: "Permute2Uint32x4", argLen: 3, generic: true, }, { - name: "Permute2Float32x4", + name: "Permute2Int32x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat32x4", + name: "Permute2MaskedInt32x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt32x4", + name: "Permute2MaskedUint32x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint32x4", + name: "Permute2MaskedFloat32x4", argLen: 4, generic: true, }, @@ -66848,16 +66816,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftUint32x4", argLen: 2, @@ -66928,16 +66886,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", - argLen: 4, - generic: true, - }, { name: "XorUint32x4", argLen: 2, @@ -67111,17 +67059,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt32x8", + name: "PermuteFloat32x8", argLen: 2, generic: true, }, { - name: "PermuteFloat32x8", + name: "PermuteInt32x8", argLen: 2, generic: true, }, { - name: "Permute2Uint32x8", + name: "Permute2Int32x8", argLen: 3, generic: true, }, @@ -67131,7 +67079,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int32x8", + name: "Permute2Uint32x8", argLen: 3, generic: true, }, @@ -67141,12 +67089,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt32x8", + name: "Permute2MaskedUint32x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint32x8", + name: "Permute2MaskedInt32x8", argLen: 4, generic: true, }, @@ -67195,16 +67143,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftUint32x8", argLen: 2, @@ -67275,16 +67213,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", - argLen: 4, - generic: true, - }, { name: "XorUint32x8", argLen: 2, @@ -67469,12 +67397,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint64x2", + name: "Permute2MaskedFloat64x2", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat64x2", + name: "Permute2MaskedUint64x2", argLen: 4, generic: true, }, @@ -67742,32 +67670,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteFloat64x4", + name: "PermuteUint64x4", argLen: 2, generic: true, }, { - name: "PermuteUint64x4", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "PermuteInt64x4", + name: "PermuteFloat64x4", argLen: 2, generic: true, }, { - name: "Permute2Int64x4", + name: "Permute2Float64x4", argLen: 3, generic: true, }, { - name: "Permute2Uint64x4", + name: "Permute2Int64x4", argLen: 3, generic: true, }, { - name: "Permute2Float64x4", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, @@ -67792,12 +67720,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "PermuteMaskedInt64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "PermuteMaskedUint64x4", argLen: 3, generic: true, }, @@ -68065,22 +67993,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt64x8", + name: "PermuteFloat64x8", argLen: 2, generic: true, }, { - name: "PermuteUint64x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "PermuteFloat64x8", + name: "PermuteUint64x8", argLen: 2, generic: true, }, { - name: "Permute2Uint64x8", + name: "Permute2Int64x8", argLen: 3, generic: true, }, @@ -68090,7 +68018,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int64x8", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, @@ -68100,12 +68028,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedFloat64x8", + name: "Permute2MaskedInt64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt64x8", + name: "Permute2MaskedFloat64x8", argLen: 4, generic: true, }, @@ -68115,12 +68043,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt64x8", + name: "PermuteMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat64x8", + name: "PermuteMaskedInt64x8", argLen: 3, generic: true, }, @@ -68391,12 +68319,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int8x16", + name: "Permute2Uint8x16", argLen: 3, generic: true, }, { - name: "Permute2Uint8x16", + name: "Permute2Int8x16", argLen: 3, generic: true, }, @@ -68643,22 +68571,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt8x32", + name: "Permute2MaskedUint8x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint8x32", + name: "Permute2MaskedInt8x32", argLen: 4, generic: true, }, { - name: "PermuteMaskedInt8x32", + name: "PermuteMaskedUint8x32", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x32", + name: "PermuteMaskedInt8x32", argLen: 3, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 53dffe10e4e036..5c7cafd6f23697 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4297,21 +4297,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: - v.Op = OpAMD64VPDPBUSDS512 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: - v.Op = OpAMD64VPDPBUSDS128 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: - v.Op = OpAMD64VPDPBUSDS256 - return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -5416,21 +5401,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) - case OpUnsignedSignedQuadDotProdAccumulateUint32x16: - v.Op = OpAMD64VPDPBUSD512 - return true - case OpUnsignedSignedQuadDotProdAccumulateUint32x4: - v.Op = OpAMD64VPDPBUSD128 - return true - case OpUnsignedSignedQuadDotProdAccumulateUint32x8: - v.Op = OpAMD64VPDPBUSD256 - return true case OpWB: v.Op = OpAMD64LoweredWB return true @@ -49615,66 +49585,6 @@ func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32 return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -53973,66 +53883,6 @@ func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Val return true } } -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpXorMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index fd7ebb20a34015..337f0b86e61136 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1634,6 +1634,12 @@ func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } +func opLen3_31(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[2], args[1], args[0]) + } +} + func opLen3_21(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, t, args[1], args[0], args[2]) @@ -1658,6 +1664,12 @@ func opLen4_231(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args [] } } +func opLen4_31(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[2], args[1], args[0], args[3]) + } +} + func plainPanicSimdImm(s *state) { cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) cmp.AuxInt = 0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 1472f5ec1a7fb5..3d9294990853e6 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -993,12 +993,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1318,12 +1318,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) @@ -1358,18 +1358,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) @@ -1770,18 +1764,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 3b87836962a134..4624105d7998ca 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -2115,192 +2115,192 @@ func (x Float64x8) FloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* FusedMultiplyAdd */ -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddMasked */ -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplyAddSub */ -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddSubMasked */ -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplySubAdd */ -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplySubAddMasked */ -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -5373,37 +5373,37 @@ func (x Int16x32) PairDotProd(y Int16x32) Int32x16 /* PairDotProdAccumulate */ -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +func (x Int16x8) PairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +func (x Int16x16) PairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +func (x Int16x32) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 /* PairDotProdAccumulateMasked */ -// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +func (x Int16x8) PairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 -// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +func (x Int16x16) PairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 -// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 /* PairDotProdMasked */ @@ -7469,37 +7469,37 @@ func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* SaturatedPairDotProdAccumulate */ -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +func (x Int16x8) SaturatedPairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +func (x Int16x16) SaturatedPairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +func (x Int16x32) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 /* SaturatedPairDotProdAccumulateMasked */ -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +func (x Int16x8) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +func (x Int16x16) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +func (x Int16x32) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 /* SaturatedPairwiseAdd */ @@ -7695,67 +7695,37 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask1 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 +func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 /* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 +func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 /* Set128 */ @@ -10165,67 +10135,37 @@ func (x Float64x8) TruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 +func (x Int8x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 +func (x Int8x32) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +func (x Int8x64) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 /* UnsignedSignedQuadDotProdAccumulateMasked */ -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 +func (x Int8x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 +func (x Int8x32) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 +func (x Int8x64) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 /* Xor */ diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index d19889cc76482e..14e5fe31794f99 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -202,6 +202,25 @@ func TestAndNot(t *testing.T) { []int32{0b10, 0b00, 0b10, 0b00}, "AndNot") } +func TestPairDotProdAccumulate(t *testing.T) { + if !simd.HasAVX512GFNI() { + // TODO: this function is actually VNNI, let's implement and call the right check. + t.Skip("Test requires HasAVX512GFNI, not available on this hardware") + return + } + x := simd.LoadInt16x8Slice([]int16{2, 2, 2, 2, 2, 2, 2, 2}) + z := simd.LoadInt32x4Slice([]int32{3, 3, 3, 3}) + want := []int32{11, 11, 11, 11} + got := make([]int32, 4) + z = x.PairDotProdAccumulate(x, z) + z.StoreSlice(got) + for i := range 4 { + if got[i] != want[i] { + t.Errorf("a and b differ at index %d, got=%d, want=%d", i, got[i], want[i]) + } + } +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 8f0fb665be6e57..d46c05e5290c4e 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -3294,55 +3294,6 @@ func testInt32x4Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whic } } -func testInt32x4Int16x8Int16x8Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "PairDotProdAccumulateMasked": - gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - case "SaturatedPairDotProdAccumulateMasked": - gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3445,55 +3396,6 @@ func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x4Uint8x16Int8x16Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Uint8x16Int8x16Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x4Unary(t *testing.T, v0 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3688,55 +3590,6 @@ func testInt32x8Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whic } } -func testInt32x8Int16x16Int16x16Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "PairDotProdAccumulateMasked": - gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - case "SaturatedPairDotProdAccumulateMasked": - gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -3839,55 +3692,6 @@ func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x8Uint8x32Int8x32Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Uint8x32Int8x32Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x8Unary(t *testing.T, v0 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -4055,55 +3859,6 @@ func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whi } } -func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "PairDotProdAccumulateMasked": - gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - case "SaturatedPairDotProdAccumulateMasked": - gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -4206,55 +3961,6 @@ func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -6880,55 +6586,6 @@ func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint } } -func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Uint8x16Int8x16Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testUint32x4Unary(t *testing.T, v0 []uint32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x4 @@ -7215,55 +6872,6 @@ func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint } } -func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Uint8x32Int8x32Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testUint32x8Unary(t *testing.T, v0 []uint32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x8 @@ -7525,55 +7133,6 @@ func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uin } } -func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x16 @@ -8430,6 +7989,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // GaloisFieldAffineTransformMasked // Get128 // GetElem +// PairDotProdAccumulate +// PairDotProdAccumulateMasked // Permute // Permute2 // Permute2Masked @@ -8440,6 +8001,10 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // RotateAllRightMasked // RoundWithPrecision // RoundWithPrecisionMasked +// SaturatedPairDotProdAccumulate +// SaturatedPairDotProdAccumulateMasked +// SaturatedUnsignedSignedQuadDotProdAccumulate +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked // Set128 // SetElem // ShiftAllLeft @@ -8452,3 +8017,5 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // ShiftAllRightMasked // TruncWithPrecision // TruncWithPrecisionMasked +// UnsignedSignedQuadDotProdAccumulate +// UnsignedSignedQuadDotProdAccumulateMasked From 03a3887f31264e778c9aaf62247a478eedd3633d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 16 Jul 2025 17:02:47 +0000 Subject: [PATCH 090/139] [dev.simd] simd: clean up masked op doc This CL is generated by CL 688395. Change-Id: I40c6a64c6002b28040e6af746481b4deb2049179 Reviewed-on: https://go-review.googlesource.com/c/go/+/688396 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/ops_amd64.go | 1940 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 1786 insertions(+), 154 deletions(-) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 4624105d7998ca..a5c2f2d5c28187 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -70,61 +70,85 @@ func (x Int64x8) Absolute() Int64x8 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x16) AbsoluteMasked(mask Mask8x16) Int8x16 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x32) AbsoluteMasked(mask Mask8x32) Int8x32 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x64) AbsoluteMasked(mask Mask8x64) Int8x64 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x8) AbsoluteMasked(mask Mask16x8) Int16x8 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x16) AbsoluteMasked(mask Mask16x16) Int16x16 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x32) AbsoluteMasked(mask Mask16x32) Int16x32 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSD, CPU Feature: AVX512F func (x Int32x4) AbsoluteMasked(mask Mask32x4) Int32x4 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSD, CPU Feature: AVX512F func (x Int32x8) AbsoluteMasked(mask Mask32x8) Int32x8 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSD, CPU Feature: AVX512F func (x Int32x16) AbsoluteMasked(mask Mask32x16) Int32x16 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x2) AbsoluteMasked(mask Mask64x2) Int64x2 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x4) AbsoluteMasked(mask Mask64x4) Int64x4 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x8) AbsoluteMasked(mask Mask64x8) Int64x8 @@ -284,151 +308,211 @@ func (x Uint64x8) Add(y Uint64x8) Uint64x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPS, CPU Feature: AVX512F func (x Float32x4) AddMasked(y Float32x4, mask Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPS, CPU Feature: AVX512F func (x Float32x8) AddMasked(y Float32x8, mask Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPS, CPU Feature: AVX512F func (x Float32x16) AddMasked(y Float32x16, mask Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPD, CPU Feature: AVX512F func (x Float64x2) AddMasked(y Float64x2, mask Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPD, CPU Feature: AVX512F func (x Float64x4) AddMasked(y Float64x4, mask Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPD, CPU Feature: AVX512F func (x Float64x8) AddMasked(y Float64x8, mask Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x16) AddMasked(y Int8x16, mask Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x32) AddMasked(y Int8x32, mask Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x64) AddMasked(y Int8x64, mask Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x8) AddMasked(y Int16x8, mask Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x16) AddMasked(y Int16x16, mask Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x32) AddMasked(y Int16x32, mask Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Int32x4) AddMasked(y Int32x4, mask Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Int32x8) AddMasked(y Int32x8, mask Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Int32x16) AddMasked(y Int32x16, mask Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x2) AddMasked(y Int64x2, mask Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x4) AddMasked(y Int64x4, mask Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x8) AddMasked(y Int64x8, mask Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x16) AddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x32) AddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x64) AddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x8) AddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x16) AddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x32) AddMasked(y Uint16x32, mask Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x4) AddMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x8) AddMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x16) AddMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x2) AddMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -486,7 +570,7 @@ func (x Int32x4) And(y Int32x4) Int32x4 // Asm: VPAND, CPU Feature: AVX2 func (x Int32x8) And(y Int32x8) Int32x8 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) And(y Int32x16) Int32x16 @@ -501,7 +585,7 @@ func (x Int64x2) And(y Int64x2) Int64x2 // Asm: VPAND, CPU Feature: AVX2 func (x Int64x4) And(y Int64x4) Int64x4 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) And(y Int64x8) Int64x8 @@ -536,7 +620,7 @@ func (x Uint32x4) And(y Uint32x4) Uint32x4 // Asm: VPAND, CPU Feature: AVX2 func (x Uint32x8) And(y Uint32x8) Uint32x8 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) And(y Uint32x16) Uint32x16 @@ -551,69 +635,93 @@ func (x Uint64x2) And(y Uint64x2) Uint64x2 // Asm: VPAND, CPU Feature: AVX2 func (x Uint64x4) And(y Uint64x4) Uint64x4 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x4) AndMasked(y Int32x4, mask Mask32x4) Int32x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x8) AndMasked(y Int32x8, mask Mask32x8) Int32x8 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) AndMasked(y Int32x16, mask Mask32x16) Int32x16 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x2) AndMasked(y Int64x2, mask Mask64x2) Int64x2 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x4) AndMasked(y Int64x4, mask Mask64x4) Int64x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) AndMasked(y Int64x8, mask Mask64x8) Int64x8 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x4) AndMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x8) AndMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) AndMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x2) AndMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x4) AndMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -724,61 +832,85 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -818,31 +950,43 @@ func (x Float64x8) ApproximateReciprocal() Float64x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalMasked(mask Mask64x8) Float64x8 @@ -882,31 +1026,43 @@ func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalOfSqrtMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalOfSqrtMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalOfSqrtMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalOfSqrtMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalOfSqrtMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalOfSqrtMasked(mask Mask64x8) Float64x8 @@ -946,31 +1102,43 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x16) AverageMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x32) AverageMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x64) AverageMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x8) AverageMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x16) AverageMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x32) AverageMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -998,42 +1166,42 @@ func (x Float64x4) Ceil() Float64x4 /* CeilWithPrecision */ -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -1042,42 +1210,54 @@ func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 /* CeilWithPrecisionMasked */ -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -1314,6 +1494,8 @@ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1321,6 +1503,8 @@ func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x4) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1328,6 +1512,8 @@ func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x8) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1335,6 +1521,8 @@ func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1342,6 +1530,8 @@ func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x2) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1349,6 +1539,8 @@ func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x4) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1402,6 +1594,8 @@ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1409,6 +1603,8 @@ func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x4) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1416,6 +1612,8 @@ func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x8) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1423,6 +1621,8 @@ func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1430,6 +1630,8 @@ func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x2) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1437,6 +1639,8 @@ func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x4) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1490,6 +1694,8 @@ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1497,6 +1703,8 @@ func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x4) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1504,6 +1712,8 @@ func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x8) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1511,6 +1721,8 @@ func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1518,6 +1730,8 @@ func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x2) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1525,6 +1739,8 @@ func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x4) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1578,6 +1794,8 @@ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1585,6 +1803,8 @@ func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x4) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1592,6 +1812,8 @@ func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x8) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1599,6 +1821,8 @@ func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1606,6 +1830,8 @@ func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x2) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1613,6 +1839,8 @@ func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x4) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1654,31 +1882,43 @@ func (x Float64x8) Div(y Float64x8) Float64x8 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x4) DivMasked(y Float32x4, mask Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x8) DivMasked(y Float32x8, mask Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x16) DivMasked(y Float32x16, mask Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x2) DivMasked(y Float64x2, mask Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 @@ -1791,7 +2031,7 @@ func (x Float32x4) Equal(y Float32x4) Mask32x4 // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Equal(y Float32x8) Mask32x8 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Equal(y Float32x16) Mask32x16 @@ -1806,199 +2046,259 @@ func (x Float64x2) Equal(y Float64x2) Mask64x2 // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Equal(y Float64x4) Mask64x4 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Equal(y Float64x8) Mask64x8 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Equal(y Int8x64) Mask8x64 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Equal(y Int16x32) Mask16x32 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Equal(y Int32x16) Mask32x16 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Equal(y Int64x8) Mask64x8 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Equal(y Uint32x16) Mask32x16 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* EqualMasked */ -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) EqualMasked(y Float32x4, mask Mask32x4) Mask32x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) EqualMasked(y Float32x8, mask Mask32x8) Mask32x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) EqualMasked(y Float32x16, mask Mask32x16) Mask32x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) EqualMasked(y Float64x2, mask Mask64x2) Mask64x2 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) EqualMasked(y Float64x4, mask Mask64x4) Mask64x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) EqualMasked(y Float64x8, mask Mask64x8) Mask64x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) EqualMasked(y Int8x16, mask Mask8x16) Mask8x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) EqualMasked(y Int8x32, mask Mask8x32) Mask8x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) EqualMasked(y Int8x64, mask Mask8x64) Mask8x64 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) EqualMasked(y Int16x8, mask Mask16x8) Mask16x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) EqualMasked(y Int16x16, mask Mask16x16) Mask16x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) EqualMasked(y Int16x32, mask Mask16x32) Mask16x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) EqualMasked(y Int32x4, mask Mask32x4) Mask32x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) EqualMasked(y Int32x8, mask Mask32x8) Mask32x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) EqualMasked(y Int32x16, mask Mask32x16) Mask32x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) EqualMasked(y Int64x2, mask Mask64x2) Mask64x2 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) EqualMasked(y Int64x4, mask Mask64x4) Mask64x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) EqualMasked(y Int64x8, mask Mask64x8) Mask64x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) EqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) EqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) EqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) EqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) EqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) EqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) EqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) EqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) EqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) EqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -2027,42 +2327,42 @@ func (x Float64x4) Floor() Float64x4 /* FloorWithPrecision */ -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -2071,42 +2371,54 @@ func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 /* FloorWithPrecisionMasked */ -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -2149,31 +2461,43 @@ func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -2213,31 +2537,43 @@ func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -2277,31 +2613,43 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -2380,6 +2728,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI @@ -2391,6 +2741,8 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI @@ -2402,6 +2754,8 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI @@ -2414,6 +2768,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI @@ -2424,6 +2780,8 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI @@ -2434,6 +2792,8 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI @@ -2464,18 +2824,24 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // +// This operation is applied selectively under a write mask. +// // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, mask Mask8x16) Uint8x16 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // +// This operation is applied selectively under a write mask. +// // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, mask Mask8x32) Uint8x32 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // +// This operation is applied selectively under a write mask. +// // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 @@ -2917,151 +3283,211 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -3069,151 +3495,211 @@ func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -3253,31 +3739,43 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) IsNanMasked(y Float32x4, mask Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) IsNanMasked(y Float32x8, mask Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) IsNanMasked(y Float32x16, mask Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) IsNanMasked(y Float64x2, mask Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) IsNanMasked(y Float64x4, mask Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) IsNanMasked(y Float64x8, mask Mask64x8) Mask64x8 @@ -3589,151 +4087,211 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -3741,151 +4299,211 @@ func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -4045,151 +4663,211 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x4) MaxMasked(y Float32x4, mask Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x8) MaxMasked(y Float32x8, mask Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x16) MaxMasked(y Float32x16, mask Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x2) MaxMasked(y Float64x2, mask Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x4) MaxMasked(y Float64x4, mask Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x8) MaxMasked(y Float64x8, mask Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x16) MaxMasked(y Int8x16, mask Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x32) MaxMasked(y Int8x32, mask Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x64) MaxMasked(y Int8x64, mask Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x8) MaxMasked(y Int16x8, mask Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x16) MaxMasked(y Int16x16, mask Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x32) MaxMasked(y Int16x32, mask Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x4) MaxMasked(y Int32x4, mask Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x8) MaxMasked(y Int32x8, mask Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x16) MaxMasked(y Int32x16, mask Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x2) MaxMasked(y Int64x2, mask Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x4) MaxMasked(y Int64x4, mask Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x8) MaxMasked(y Int64x8, mask Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x16) MaxMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x32) MaxMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x64) MaxMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x8) MaxMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x16) MaxMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x32) MaxMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x4) MaxMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x8) MaxMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x16) MaxMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x2) MaxMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x4) MaxMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x8) MaxMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -4349,151 +5027,211 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPS, CPU Feature: AVX512F func (x Float32x4) MinMasked(y Float32x4, mask Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPS, CPU Feature: AVX512F func (x Float32x8) MinMasked(y Float32x8, mask Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPS, CPU Feature: AVX512F func (x Float32x16) MinMasked(y Float32x16, mask Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPD, CPU Feature: AVX512F func (x Float64x2) MinMasked(y Float64x2, mask Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPD, CPU Feature: AVX512F func (x Float64x4) MinMasked(y Float64x4, mask Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPD, CPU Feature: AVX512F func (x Float64x8) MinMasked(y Float64x8, mask Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x16) MinMasked(y Int8x16, mask Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x32) MinMasked(y Int8x32, mask Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x64) MinMasked(y Int8x64, mask Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x8) MinMasked(y Int16x8, mask Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x16) MinMasked(y Int16x16, mask Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x32) MinMasked(y Int16x32, mask Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x4) MinMasked(y Int32x4, mask Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x8) MinMasked(y Int32x8, mask Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x16) MinMasked(y Int32x16, mask Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x2) MinMasked(y Int64x2, mask Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x4) MinMasked(y Int64x4, mask Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x8) MinMasked(y Int64x8, mask Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x16) MinMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x32) MinMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x64) MinMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x8) MinMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x16) MinMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x32) MinMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x4) MinMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x8) MinMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x16) MinMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x2) MinMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x4) MinMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x8) MinMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -4509,7 +5247,7 @@ func (x Float32x4) Mul(y Float32x4) Float32x4 // Asm: VMULPS, CPU Feature: AVX func (x Float32x8) Mul(y Float32x8) Float32x8 -// Mul multiplies corresponding elements of two vectors, masked. +// Mul multiplies corresponding elements of two vectors. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) Mul(y Float32x16) Float32x16 @@ -4524,7 +5262,7 @@ func (x Float64x2) Mul(y Float64x2) Float64x2 // Asm: VMULPD, CPU Feature: AVX func (x Float64x4) Mul(y Float64x4) Float64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// Mul multiplies corresponding elements of two vectors. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) Mul(y Float64x8) Float64x8 @@ -4565,31 +5303,43 @@ func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x4) MulByPowOf2Masked(y Float32x4, mask Mask32x4) Float32x4 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x8) MulByPowOf2Masked(y Float32x8, mask Mask32x8) Float32x8 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x16) MulByPowOf2Masked(y Float32x16, mask Mask32x16) Float32x16 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x2) MulByPowOf2Masked(y Float64x2, mask Mask64x2) Float64x2 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x4) MulByPowOf2Masked(y Float64x4, mask Mask64x4) Float64x4 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) MulByPowOf2Masked(y Float64x8, mask Mask64x8) Float64x8 @@ -4607,19 +5357,19 @@ func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 // Asm: VPMULDQ, CPU Feature: AVX2 func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F @@ -4637,19 +5387,19 @@ func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 // Asm: VPMULUDQ, CPU Feature: AVX2 func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F @@ -4657,39 +5407,51 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 /* MulEvenWidenMasked */ -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWidenMasked(y Int64x2, mask Mask64x2) Int64x2 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWidenMasked(y Int64x4, mask Mask64x4) Int64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x8) MulEvenWidenMasked(y Int64x8, mask Mask64x8) Int64x8 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -4705,7 +5467,7 @@ func (x Int16x8) MulHigh(y Int16x8) Int16x8 // Asm: VPMULHW, CPU Feature: AVX2 func (x Int16x16) MulHigh(y Int16x16) Int16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHigh(y Int16x32) Int16x32 @@ -4720,39 +5482,51 @@ func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 // Asm: VPMULHUW, CPU Feature: AVX2 func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -4769,7 +5543,7 @@ func (x Int16x8) MulLow(y Int16x8) Int16x8 // Asm: VPMULLW, CPU Feature: AVX2 func (x Int16x16) MulLow(y Int16x16) Int16x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLow(y Int16x32) Int16x32 @@ -4784,101 +5558,131 @@ func (x Int32x4) MulLow(y Int32x4) Int32x4 // Asm: VPMULLD, CPU Feature: AVX2 func (x Int32x8) MulLow(y Int32x8) Int32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLow(y Int32x16) Int32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLow(y Int64x2) Int64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLow(y Int64x4) Int64x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* MulLowMasked */ -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x8) MulLowMasked(y Int16x8, mask Mask16x8) Int16x8 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x16) MulLowMasked(y Int16x16, mask Mask16x16) Int16x16 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLowMasked(y Int16x32, mask Mask16x32) Int16x32 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x4) MulLowMasked(y Int32x4, mask Mask32x4) Int32x4 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x8) MulLowMasked(y Int32x8, mask Mask32x8) Int32x8 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLowMasked(y Int32x16, mask Mask32x16) Int32x16 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLowMasked(y Int64x2, mask Mask64x2) Int64x2 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLowMasked(y Int64x4, mask Mask64x4) Int64x4 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLowMasked(y Int64x8, mask Mask64x8) Int64x8 /* MulMasked */ -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 @@ -5039,151 +5843,211 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) NotEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) NotEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) NotEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) NotEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) NotEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) NotEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -5219,7 +6083,7 @@ func (x Int32x4) Or(y Int32x4) Int32x4 // Asm: VPOR, CPU Feature: AVX2 func (x Int32x8) Or(y Int32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) Or(y Int32x16) Int32x16 @@ -5234,7 +6098,7 @@ func (x Int64x2) Or(y Int64x2) Int64x2 // Asm: VPOR, CPU Feature: AVX2 func (x Int64x4) Or(y Int64x4) Int64x4 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) Or(y Int64x8) Int64x8 @@ -5269,7 +6133,7 @@ func (x Uint32x4) Or(y Uint32x4) Uint32x4 // Asm: VPOR, CPU Feature: AVX2 func (x Uint32x8) Or(y Uint32x8) Uint32x8 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) Or(y Uint32x16) Uint32x16 @@ -5284,69 +6148,93 @@ func (x Uint64x2) Or(y Uint64x2) Uint64x2 // Asm: VPOR, CPU Feature: AVX2 func (x Uint64x4) Or(y Uint64x4) Uint64x4 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x4) OrMasked(y Int32x4, mask Mask32x4) Int32x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x8) OrMasked(y Int32x8, mask Mask32x8) Int32x8 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) OrMasked(y Int32x16, mask Mask32x16) Int32x16 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x2) OrMasked(y Int64x2, mask Mask64x2) Int64x2 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x4) OrMasked(y Int64x4, mask Mask64x4) Int64x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) OrMasked(y Int64x8, mask Mask64x8) Int64x8 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x4) OrMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x8) OrMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -5392,16 +6280,22 @@ func (x Int16x32) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 // PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int16x8) PairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 // PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int16x16) PairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 // PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 @@ -5410,18 +6304,24 @@ func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask3 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 @@ -5992,6 +6892,8 @@ func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, mask Mask8x16) Int8x16 @@ -6000,6 +6902,8 @@ func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, mask Mask8x16) Int8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, mask Mask8x16) Uint8x16 @@ -6008,6 +6912,8 @@ func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, mask Mask8x16) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, mask Mask8x32) Int8x32 @@ -6016,6 +6922,8 @@ func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, mask Mask8x32) Int8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, mask Mask8x32) Uint8x32 @@ -6024,6 +6932,8 @@ func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, mask Mask8x32) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, mask Mask8x64) Int8x64 @@ -6032,6 +6942,8 @@ func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, mask Mask8x64) Int8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Uint8x64 @@ -6040,6 +6952,8 @@ func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int16x8 @@ -6048,6 +6962,8 @@ func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int1 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Uint16x8 @@ -6056,6 +6972,8 @@ func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) Int16x16 @@ -6064,6 +6982,8 @@ func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16) Uint16x16 @@ -6072,6 +6992,8 @@ func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) Int16x32 @@ -6080,6 +7002,8 @@ func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32) Uint16x32 @@ -6088,6 +7012,8 @@ func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PS, CPU Feature: AVX512F func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) Float32x4 @@ -6096,6 +7022,8 @@ func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int32x4 @@ -6104,6 +7032,8 @@ func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int3 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Uint32x4 @@ -6112,6 +7042,8 @@ func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PS, CPU Feature: AVX512F func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) Float32x8 @@ -6120,6 +7052,8 @@ func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int32x8 @@ -6128,6 +7062,8 @@ func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int3 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Uint32x8 @@ -6136,6 +7072,8 @@ func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PS, CPU Feature: AVX512F func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x16) Float32x16 @@ -6144,6 +7082,8 @@ func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) Int32x16 @@ -6152,6 +7092,8 @@ func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16) Uint32x16 @@ -6160,6 +7102,8 @@ func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PD, CPU Feature: AVX512F func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) Float64x2 @@ -6168,6 +7112,8 @@ func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int64x2 @@ -6176,6 +7122,8 @@ func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int6 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Uint64x2 @@ -6184,6 +7132,8 @@ func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PD, CPU Feature: AVX512F func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) Float64x4 @@ -6192,6 +7142,8 @@ func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int64x4 @@ -6200,6 +7152,8 @@ func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int6 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Uint64x4 @@ -6208,6 +7162,8 @@ func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PD, CPU Feature: AVX512F func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) Float64x8 @@ -6216,6 +7172,8 @@ func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int64x8 @@ -6224,6 +7182,8 @@ func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int6 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Uint64x8 @@ -6233,6 +7193,8 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Ui // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 @@ -6240,6 +7202,8 @@ func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 @@ -6247,6 +7211,8 @@ func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 @@ -6254,6 +7220,8 @@ func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 @@ -6261,6 +7229,8 @@ func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 @@ -6268,6 +7238,8 @@ func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 @@ -6275,6 +7247,8 @@ func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 @@ -6282,6 +7256,8 @@ func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 @@ -6289,6 +7265,8 @@ func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 @@ -6296,6 +7274,8 @@ func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 @@ -6303,6 +7283,8 @@ func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 @@ -6310,6 +7292,8 @@ func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 @@ -6317,6 +7301,8 @@ func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 @@ -6324,6 +7310,8 @@ func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 @@ -6331,6 +7319,8 @@ func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 @@ -6338,6 +7328,8 @@ func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 @@ -6345,6 +7337,8 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 @@ -6352,6 +7346,8 @@ func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 @@ -6359,6 +7355,8 @@ func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 @@ -6366,6 +7364,8 @@ func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 @@ -6373,6 +7373,8 @@ func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 @@ -6380,6 +7382,8 @@ func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 @@ -6387,6 +7391,8 @@ func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 @@ -6394,6 +7400,8 @@ func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 @@ -6523,121 +7531,169 @@ func (x Uint64x8) PopCount() Uint64x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x16) PopCountMasked(mask Mask8x16) Int8x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x32) PopCountMasked(mask Mask8x32) Int8x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x64) PopCountMasked(mask Mask8x64) Int8x64 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x8) PopCountMasked(mask Mask16x8) Int16x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x16) PopCountMasked(mask Mask16x16) Int16x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x32) PopCountMasked(mask Mask16x32) Int16x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x4) PopCountMasked(mask Mask32x4) Int32x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x8) PopCountMasked(mask Mask32x8) Int32x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x16) PopCountMasked(mask Mask32x16) Int32x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x2) PopCountMasked(mask Mask64x2) Int64x2 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x4) PopCountMasked(mask Mask64x4) Int64x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x8) PopCountMasked(mask Mask64x8) Int64x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x16) PopCountMasked(mask Mask8x16) Uint8x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x32) PopCountMasked(mask Mask8x32) Uint8x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x64) PopCountMasked(mask Mask8x64) Uint8x64 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x8) PopCountMasked(mask Mask16x8) Uint16x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x16) PopCountMasked(mask Mask16x16) Uint16x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x32) PopCountMasked(mask Mask16x32) Uint16x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x4) PopCountMasked(mask Mask32x4) Uint32x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x8) PopCountMasked(mask Mask32x8) Uint32x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x16) PopCountMasked(mask Mask32x16) Uint32x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x2) PopCountMasked(mask Mask64x2) Uint64x2 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x4) PopCountMasked(mask Mask64x4) Uint64x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x8) PopCountMasked(mask Mask64x8) Uint64x8 @@ -6731,6 +7787,8 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6738,6 +7796,8 @@ func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6745,6 +7805,8 @@ func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6752,6 +7814,8 @@ func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6759,6 +7823,8 @@ func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6766,6 +7832,8 @@ func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6773,6 +7841,8 @@ func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6780,6 +7850,8 @@ func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6787,6 +7859,8 @@ func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6794,6 +7868,8 @@ func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6801,6 +7877,8 @@ func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6808,6 +7886,8 @@ func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6903,6 +7983,8 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6910,6 +7992,8 @@ func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6917,6 +8001,8 @@ func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6924,6 +8010,8 @@ func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6931,6 +8019,8 @@ func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6938,6 +8028,8 @@ func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6945,6 +8037,8 @@ func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6952,6 +8046,8 @@ func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6959,6 +8055,8 @@ func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6966,6 +8064,8 @@ func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6973,6 +8073,8 @@ func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6980,6 +8082,8 @@ func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -7051,61 +8155,85 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x4) RotateLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x8) RotateLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x16) RotateLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x2) RotateLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x4) RotateLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x8) RotateLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x4) RotateLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x8) RotateLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x16) RotateLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x2) RotateLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x4) RotateLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x8) RotateLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -7175,61 +8303,85 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x4) RotateRightMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x8) RotateRightMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x16) RotateRightMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x2) RotateRightMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x4) RotateRightMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -7303,6 +8455,8 @@ func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -7310,6 +8464,8 @@ func (x Float32x4) RoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -7317,6 +8473,8 @@ func (x Float32x8) RoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -7324,6 +8482,8 @@ func (x Float32x16) RoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -7331,6 +8491,8 @@ func (x Float64x2) RoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -7338,6 +8500,8 @@ func (x Float64x4) RoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -7409,61 +8573,85 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedAddMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedAddMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedAddMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedAddMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedAddMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedAddMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedAddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedAddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedAddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedAddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -7488,16 +8676,22 @@ func (x Int16x32) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x1 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int16x8) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int16x16) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int16x32) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 @@ -7595,61 +8789,85 @@ func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedSubMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedSubMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedSubMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedSubMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedSubMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedSubMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedSubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedSubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedSubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedSubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedSubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedSubMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -7678,18 +8896,24 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, mask Mask16x8) Int16x8 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, mask Mask16x16) Int16x16 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask16x32) Int16x32 @@ -7714,16 +8938,22 @@ func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int3 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 @@ -8100,6 +9330,8 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8108,6 +9340,8 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8116,6 +9350,8 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8124,6 +9360,8 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8132,6 +9370,8 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8140,6 +9380,8 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8148,6 +9390,8 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8156,6 +9400,8 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8164,6 +9410,8 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8172,6 +9420,8 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8180,6 +9430,8 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8188,6 +9440,8 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8196,6 +9450,8 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8204,6 +9460,8 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8212,6 +9470,8 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8220,6 +9480,8 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8228,6 +9490,8 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8236,6 +9500,8 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8245,91 +9511,127 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, ma // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Uint64x8 @@ -8576,6 +9878,8 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8584,6 +9888,8 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8592,6 +9898,8 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8600,6 +9908,8 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8608,6 +9918,8 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8616,6 +9928,8 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8624,6 +9938,8 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8632,6 +9948,8 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8640,6 +9958,8 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8648,6 +9968,8 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8656,6 +9978,8 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8664,6 +9988,8 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8672,6 +9998,8 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8680,6 +10008,8 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8688,6 +10018,8 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8696,6 +10028,8 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8704,6 +10038,8 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8712,6 +10048,8 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8721,91 +10059,127 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, m // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Uint64x8 @@ -9016,108 +10390,144 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 @@ -9125,91 +10535,127 @@ func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask M // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x8) ShiftLeftMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x16) ShiftLeftMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x32) ShiftLeftMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x4) ShiftLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x8) ShiftLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x16) ShiftLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x2) ShiftLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x4) ShiftLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x8) ShiftLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftLeftMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftLeftMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftLeftMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -9420,108 +10866,144 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 @@ -9529,91 +11011,127 @@ func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x8) ShiftRightMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x16) ShiftRightMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x32) ShiftRightMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x4) ShiftRightMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x8) ShiftRightMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x16) ShiftRightMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x2) ShiftRightMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x4) ShiftRightMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x8) ShiftRightMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftRightMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftRightMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftRightMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -9691,31 +11209,43 @@ func (x Float64x8) Sqrt() Float64x8 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x4) SqrtMasked(mask Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x8) SqrtMasked(mask Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x16) SqrtMasked(mask Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x2) SqrtMasked(mask Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x4) SqrtMasked(mask Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x8) SqrtMasked(mask Mask64x8) Float64x8 @@ -9875,151 +11405,211 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x4) SubMasked(y Float32x4, mask Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x8) SubMasked(y Float32x8, mask Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x16) SubMasked(y Float32x16, mask Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x2) SubMasked(y Float64x2, mask Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x4) SubMasked(y Float64x4, mask Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x8) SubMasked(y Float64x8, mask Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x16) SubMasked(y Int8x16, mask Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x32) SubMasked(y Int8x32, mask Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x64) SubMasked(y Int8x64, mask Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x8) SubMasked(y Int16x8, mask Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x16) SubMasked(y Int16x16, mask Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x32) SubMasked(y Int16x32, mask Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x4) SubMasked(y Int32x4, mask Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x8) SubMasked(y Int32x8, mask Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x16) SubMasked(y Int32x16, mask Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x2) SubMasked(y Int64x2, mask Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x4) SubMasked(y Int64x4, mask Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x8) SubMasked(y Int64x8, mask Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x16) SubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x32) SubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x64) SubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x8) SubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x16) SubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x32) SubMasked(y Uint16x32, mask Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x4) SubMasked(y Uint32x4, mask Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x8) SubMasked(y Uint32x8, mask Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x16) SubMasked(y Uint32x16, mask Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x2) SubMasked(y Uint64x2, mask Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -10093,6 +11683,8 @@ func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -10100,6 +11692,8 @@ func (x Float32x4) TruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -10107,6 +11701,8 @@ func (x Float32x8) TruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -10114,6 +11710,8 @@ func (x Float32x16) TruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -10121,6 +11719,8 @@ func (x Float64x2) TruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -10128,6 +11728,8 @@ func (x Float64x4) TruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -10154,16 +11756,22 @@ func (x Int8x64) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int8x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int8x32) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int8x64) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 @@ -10199,7 +11807,7 @@ func (x Int32x4) Xor(y Int32x4) Int32x4 // Asm: VPXOR, CPU Feature: AVX2 func (x Int32x8) Xor(y Int32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) Xor(y Int32x16) Int32x16 @@ -10214,7 +11822,7 @@ func (x Int64x2) Xor(y Int64x2) Int64x2 // Asm: VPXOR, CPU Feature: AVX2 func (x Int64x4) Xor(y Int64x4) Int64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) Xor(y Int64x8) Int64x8 @@ -10249,7 +11857,7 @@ func (x Uint32x4) Xor(y Uint32x4) Uint32x4 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint32x8) Xor(y Uint32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) Xor(y Uint32x16) Uint32x16 @@ -10264,69 +11872,93 @@ func (x Uint64x2) Xor(y Uint64x2) Uint64x2 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint64x4) Xor(y Uint64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x4) XorMasked(y Int32x4, mask Mask32x4) Int32x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x8) XorMasked(y Int32x8, mask Mask32x8) Int32x8 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) XorMasked(y Int32x16, mask Mask32x16) Int32x16 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x2) XorMasked(y Int64x2, mask Mask64x2) Int64x2 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x4) XorMasked(y Int64x4, mask Mask64x4) Int64x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) XorMasked(y Int64x8, mask Mask64x8) Int64x8 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x4) XorMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x8) XorMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) XorMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x2) XorMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 From f0e9dc09752cc2f03fcedff458660ab2276bcf8d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 17 Jul 2025 22:23:15 +0000 Subject: [PATCH 091/139] [dev.simd] cmd/compile: fix opLen(2|3)Imm8_2I intrinsic function This function reads the const from the wrong arg, this CL fixes it. Change-Id: Icd38977a35f0df9064efb290fa6390453d6b9e5b Reviewed-on: https://go-review.googlesource.com/c/go/+/688595 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 337f0b86e61136..5415143ec3199c 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1722,7 +1722,7 @@ func opLen3Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallE func opLen2Imm8_2I(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - if args[1].Op == ssa.OpConst8 { + if args[2].Op == ssa.OpConst8 { return s.newValue2I(op, t, args[2].AuxInt< Date: Fri, 18 Jul 2025 04:26:59 +0000 Subject: [PATCH 092/139] [dev.simd] cmd/compile, simd: support load from bits for mask This CL is partially generated by CL 688855. Change-Id: I68d5fbad9445a3d2cf671822be1c0b82e7290396 Reviewed-on: https://go-review.googlesource.com/c/go/+/688875 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 4 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 16 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 4 + .../compile/internal/ssa/_gen/genericOps.go | 12 + src/cmd/compile/internal/ssa/opGen.go | 89 +++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 240 ++++++++++++++++++ src/cmd/compile/internal/ssagen/intrinsics.go | 16 ++ .../compile/internal/ssagen/simdintrinsics.go | 36 +-- src/simd/simd_test.go | 17 ++ src/simd/types_amd64.go | 72 ++++++ 10 files changed, 480 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 9c31b77e7031fb..0fafd69f54ba6a 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1461,13 +1461,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.AddRestSourceReg(simdReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) - case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512: + case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512, ssa.OpAMD64KMOVQload: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + p.To.Reg = simdOrMaskReg(v) case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 2972eae87d5479..bb7513795d9d62 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1682,6 +1682,22 @@ (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x) // XXX SIMD +(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) +(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) +(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) + +(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) +(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) +(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) + +(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) +(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) +(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) + +(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) +(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) +(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) + (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 543233f4d831fe..ec335f67f87679 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -234,6 +234,8 @@ func init() { wfpw = regInfo{inputs: []regMask{w, fp}, outputs: wonly} wfpkw = regInfo{inputs: []regMask{w, fp, mask}, outputs: wonly} + kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} + prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1314,6 +1316,8 @@ func init() { {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, + + {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 2d44cc85f8242e..6257396a6f5d24 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -666,6 +666,18 @@ var genericOps = []opData{ // XXX SIMD {name: "Add32x4", argLength: 2}, // arg0 + arg1 {name: "ZeroSIMD", argLength: 0}, + {name: "LoadMask8x16", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask8x32", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask8x64", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask16x8", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask16x16", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask16x32", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask32x4", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask32x8", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask32x16", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask64x2", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask64x4", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask64x8", argLength: 2}, // arg0 = ptr, arg1 = mem } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 29058f0b193bc0..d69e714082b9b3 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1198,6 +1198,7 @@ const ( OpAMD64Zero512 OpAMD64VZEROUPPER OpAMD64VZEROALL + OpAMD64KMOVQload OpAMD64VADDPS512 OpAMD64VADDPSMasked512 OpAMD64VRCP14PS512 @@ -4403,6 +4404,18 @@ const ( OpPrefetchCacheStreamed OpAdd32x4 OpZeroSIMD + OpLoadMask8x16 + OpLoadMask8x32 + OpLoadMask8x64 + OpLoadMask16x8 + OpLoadMask16x16 + OpLoadMask16x32 + OpLoadMask32x4 + OpLoadMask32x8 + OpLoadMask32x16 + OpLoadMask64x2 + OpLoadMask64x4 + OpLoadMask64x8 OpAddFloat32x16 OpAddMaskedFloat32x16 OpApproximateReciprocalFloat32x16 @@ -18801,6 +18814,22 @@ var opcodeTable = [...]opInfo{ asm: x86.AVZEROALL, reg: regInfo{}, }, + { + name: "KMOVQload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VADDPS512", argLen: 2, @@ -60727,6 +60756,66 @@ var opcodeTable = [...]opInfo{ argLen: 0, generic: true, }, + { + name: "LoadMask8x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x64", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x2", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x8", + argLen: 2, + generic: true, + }, { name: "AddFloat32x16", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5c7cafd6f23697..0ff19a680e4e20 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2438,6 +2438,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessUint8x64(v) case OpLoad: return rewriteValueAMD64_OpLoad(v) + case OpLoadMask16x16: + return rewriteValueAMD64_OpLoadMask16x16(v) + case OpLoadMask16x32: + return rewriteValueAMD64_OpLoadMask16x32(v) + case OpLoadMask16x8: + return rewriteValueAMD64_OpLoadMask16x8(v) + case OpLoadMask32x16: + return rewriteValueAMD64_OpLoadMask32x16(v) + case OpLoadMask32x4: + return rewriteValueAMD64_OpLoadMask32x4(v) + case OpLoadMask32x8: + return rewriteValueAMD64_OpLoadMask32x8(v) + case OpLoadMask64x2: + return rewriteValueAMD64_OpLoadMask64x2(v) + case OpLoadMask64x4: + return rewriteValueAMD64_OpLoadMask64x4(v) + case OpLoadMask64x8: + return rewriteValueAMD64_OpLoadMask64x8(v) + case OpLoadMask8x16: + return rewriteValueAMD64_OpLoadMask8x16(v) + case OpLoadMask8x32: + return rewriteValueAMD64_OpLoadMask8x32(v) + case OpLoadMask8x64: + return rewriteValueAMD64_OpLoadMask8x64(v) case OpLocalAddr: return rewriteValueAMD64_OpLocalAddr(v) case OpLsh16x16: @@ -40303,6 +40327,222 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { } return false } +func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x16 ptr mem) + // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x32 ptr mem) + // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x8 ptr mem) + // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x16 ptr mem) + // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x4 ptr mem) + // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x8 ptr mem) + // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask64x2 ptr mem) + // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask64x4 ptr mem) + // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask64x8 ptr mem) + // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask8x16 ptr mem) + // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask8x32 ptr mem) + // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask8x64 ptr mem) + // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 5415143ec3199c..e012b536b55e1b 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1775,6 +1775,22 @@ func simdStore() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { } } +func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + opCodes := map[int]map[int]ssa.Op{ + 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64}, + 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32}, + 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16}, + 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, + } + op := opCodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + return s.newValue2(op, types.TypeMask, args[0], s.mem()) + } +} + // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3d9294990853e6..8040a187bda6d1 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2132,76 +2132,64 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 14e5fe31794f99..276ae9ed5d6924 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -460,3 +460,20 @@ func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) si } } } + +func TestBitMask(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + var bits uint64 = 0b10 + results := [2]int64{} + want := [2]int64{0, 6} + m := simd.LoadMask64x2FromBits(&bits) + simd.LoadInt64x2Slice([]int64{1, 2}).AddMasked(simd.LoadInt64x2Slice([]int64{3, 4}), m).Store(&results) + for i := range 2 { + if results[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) + } + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 6cc79275767c4c..ccc8427bb3efe1 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -205,24 +205,48 @@ type Mask8x16 struct { vals [16]int8 } +// Mask8x16FromBits constructs a Mask8x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +//go:noescape +func LoadMask8x16FromBits(y *uint64) Mask8x16 + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 vals [8]int16 } +// Mask16x8FromBits constructs a Mask16x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +//go:noescape +func LoadMask16x8FromBits(y *uint64) Mask16x8 + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 vals [4]int32 } +// Mask32x4FromBits constructs a Mask32x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +//go:noescape +func LoadMask32x4FromBits(y *uint64) Mask32x4 + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 vals [2]int64 } +// Mask64x2FromBits constructs a Mask64x2 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +// +//go:noescape +func LoadMask64x2FromBits(y *uint64) Mask64x2 + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -424,24 +448,48 @@ type Mask8x32 struct { vals [32]int8 } +// Mask8x32FromBits constructs a Mask8x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +//go:noescape +func LoadMask8x32FromBits(y *uint64) Mask8x32 + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 vals [16]int16 } +// Mask16x16FromBits constructs a Mask16x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +//go:noescape +func LoadMask16x16FromBits(y *uint64) Mask16x16 + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 vals [8]int32 } +// Mask32x8FromBits constructs a Mask32x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +//go:noescape +func LoadMask32x8FromBits(y *uint64) Mask32x8 + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 vals [4]int64 } +// Mask64x4FromBits constructs a Mask64x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +//go:noescape +func LoadMask64x4FromBits(y *uint64) Mask64x4 + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -643,20 +691,44 @@ type Mask8x64 struct { vals [64]int8 } +// Mask8x64FromBits constructs a Mask8x64 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +// +//go:noescape +func LoadMask8x64FromBits(y *uint64) Mask8x64 + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 vals [32]int16 } +// Mask16x32FromBits constructs a Mask16x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +//go:noescape +func LoadMask16x32FromBits(y *uint64) Mask16x32 + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 vals [16]int32 } +// Mask32x16FromBits constructs a Mask32x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +//go:noescape +func LoadMask32x16FromBits(y *uint64) Mask32x16 + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 vals [8]int64 } + +// Mask64x8FromBits constructs a Mask64x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +//go:noescape +func LoadMask64x8FromBits(y *uint64) Mask64x8 From 41054cdb1cd9f2a7400668d385ec1a030d90389c Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 15 Jul 2025 21:38:28 +0000 Subject: [PATCH 093/139] [dev.simd] simd, internal/cpu: support more AVX CPU Feature checks This CL adds more checks, it also changes HasAVX512GFNI to be exactly checking GFNI instead of being a virtual feature. This CL copies its logic from x/sys/arch. Change-Id: I4612b0409b8a3518928300562ae08bcf123d53a7 Reviewed-on: https://go-review.googlesource.com/c/go/+/688276 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/internal/cpu/cpu.go | 61 ++++++++++++++++++++----------------- src/internal/cpu/cpu_x86.go | 46 +++++++++++++++++++--------- src/simd/cpu.go | 46 ++++++++++++++++++++++++++-- 3 files changed, 108 insertions(+), 45 deletions(-) diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index 1eeb580711439e..53633c7ca8047f 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -26,34 +26,39 @@ var CacheLineSize uintptr = CacheLinePadSize // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. var X86 struct { - _ CacheLinePad - HasAES bool - HasADX bool - HasAVX bool - HasAVX2 bool - HasAVX512GFNI bool // Virtual feature: F+CD+BW+DQ+VL+GFNI - HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL - HasAVX512F bool - HasAVX512CD bool - HasAVX512BW bool - HasAVX512DQ bool - HasAVX512VL bool - HasBMI1 bool - HasBMI2 bool - HasERMS bool - HasFSRM bool - HasFMA bool - HasGFNI bool - HasOSXSAVE bool - HasPCLMULQDQ bool - HasPOPCNT bool - HasRDTSCP bool - HasSHA bool - HasSSE3 bool - HasSSSE3 bool - HasSSE41 bool - HasSSE42 bool - _ CacheLinePad + _ CacheLinePad + HasAES bool + HasADX bool + HasAVX bool + HasAVXVNNI bool + HasAVX2 bool + HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL + HasAVX512F bool + HasAVX512CD bool + HasAVX512BW bool + HasAVX512DQ bool + HasAVX512VL bool + HasAVX512GFNI bool + HasAVX512VNNI bool + HasAVX512VBMI bool + HasAVX512VBMI2 bool + HasAVX512BITALG bool + HasAVX512VPOPCNTDQ bool + HasBMI1 bool + HasBMI2 bool + HasERMS bool + HasFSRM bool + HasFMA bool + HasOSXSAVE bool + HasPCLMULQDQ bool + HasPOPCNT bool + HasRDTSCP bool + HasSHA bool + HasSSE3 bool + HasSSSE3 bool + HasSSE41 bool + HasSSE42 bool + _ CacheLinePad } // The booleans in ARM contain the correspondingly named cpu feature bit. diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index 152a08cdbfd11a..04d89955dae09d 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -18,18 +18,26 @@ func xgetbv() (eax, edx uint32) func getGOAMD64level() int32 const ( + // eax bits + cpuid_AVXVNNI = 1 << 4 + // ecx bits - cpuid_SSE3 = 1 << 0 - cpuid_PCLMULQDQ = 1 << 1 - cpuid_SSSE3 = 1 << 9 - cpuid_GFNI = 1 << 8 - cpuid_FMA = 1 << 12 - cpuid_SSE41 = 1 << 19 - cpuid_SSE42 = 1 << 20 - cpuid_POPCNT = 1 << 23 - cpuid_AES = 1 << 25 - cpuid_OSXSAVE = 1 << 27 - cpuid_AVX = 1 << 28 + cpuid_SSE3 = 1 << 0 + cpuid_PCLMULQDQ = 1 << 1 + cpuid_AVX512VBMI = 1 << 1 + cpuid_AVX512VBMI2 = 1 << 6 + cpuid_SSSE3 = 1 << 9 + cpuid_AVX512GFNI = 1 << 8 + cpuid_AVX512VNNI = 1 << 11 + cpuid_AVX512BITALG = 1 << 12 + cpuid_FMA = 1 << 12 + cpuid_AVX512VPOPCNTDQ = 1 << 14 + cpuid_SSE41 = 1 << 19 + cpuid_SSE42 = 1 << 20 + cpuid_POPCNT = 1 << 23 + cpuid_AES = 1 << 25 + cpuid_OSXSAVE = 1 << 27 + cpuid_AVX = 1 << 28 // ebx bits cpuid_BMI1 = 1 << 3 @@ -144,7 +152,7 @@ func doinit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) @@ -158,10 +166,15 @@ func doinit() { X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) + X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI) + X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG) + X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ) + X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512VBMI) + X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2) + X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI) } X86.HasFSRM = isSet(edx7, cpuid_FSRM) - X86.HasGFNI = isSet(ecx7, cpuid_GFNI) var maxExtendedInformation uint32 maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0) @@ -182,7 +195,12 @@ func doinit() { // it. GOAMD64=v4 also implies exactly this set, and these are all // included in AVX10.1. X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL - X86.HasAVX512GFNI = X86.HasAVX512 && X86.HasGFNI + } + if eax7 >= 1 { + eax71, _, _, _ := cpuid(7, 1) + if X86.HasAVX { + X86.HasAVXVNNI = isSet(4, eax71) + } } } diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 5ff47b8873488d..7bc511652549c2 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -11,12 +11,52 @@ package simd import "internal/cpu" -// HasAVX512GFNI checks AVX512 CPU feature F+CD+BW+DQ+VL+GFNI. -func HasAVX512GFNI() bool { - return cpu.X86.HasAVX512GFNI +// HasAVX checks AVX CPU feature. +func HasAVX() bool { + return cpu.X86.HasAVX +} + +// HasAVXVNNI checks AVX CPU feature VNNI. +func HasAVXVNNI() bool { + return cpu.X86.HasAVXVNNI +} + +// HasAVX2 checks AVX2 CPU feature. +func HasAVX2() bool { + return cpu.X86.HasAVX2 } // HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. func HasAVX512() bool { return cpu.X86.HasAVX512 } + +// HasAVX512GFNI checks AVX512 CPU feature GFNI. +func HasAVX512GFNI() bool { + return cpu.X86.HasAVX512GFNI +} + +// HasAVX512VBMI checks AVX512 CPU feature VBMI +func HasAVX512VBMI() bool { + return cpu.X86.HasAVX512VBMI +} + +// HasAVX512VBMI2 checks AVX512 CPU feature VBMI2 +func HasAVX512VBMI2() bool { + return cpu.X86.HasAVX512VBMI2 +} + +// HasAVX512VNNI checks AVX512 CPU feature VNNI +func HasAVX512VNNI() bool { + return cpu.X86.HasAVX512VNNI +} + +// HasAVX512VPOPCNTDQ checks AVX512 CPU feature VPOPCNTDQ +func HasAVX512VPOPCNTDQ() bool { + return cpu.X86.HasAVX512VPOPCNTDQ +} + +// HasAVX512BITALG checks AVX512 CPU feature BITALG +func HasAVX512BITALG() bool { + return cpu.X86.HasAVX512BITALG +} From 6f7a1164e797f694c535ebf5f2c9722845a732cd Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 23 Jul 2025 07:37:14 +0000 Subject: [PATCH 094/139] [dev.simd] cmd/compile, simd: support store to bits for mask This CL is partially generated by CL 689775. Change-Id: I0c36fd2a44706c88db1a1d5ea4a6d0b9f891d85f Reviewed-on: https://go-review.googlesource.com/c/go/+/689795 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 32 +- src/cmd/compile/internal/amd64/ssa.go | 4 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 16 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 4 +- .../compile/internal/ssa/_gen/genericOps.go | 13 + .../compile/internal/ssa/_gen/simdAMD64.rules | 28 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 44 +- .../internal/ssa/_gen/simdgenericOps.go | 34 +- src/cmd/compile/internal/ssa/opGen.go | 635 ++++++++++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 316 +++++++-- src/cmd/compile/internal/ssagen/intrinsics.go | 17 + .../compile/internal/ssagen/simdintrinsics.go | 36 +- src/simd/ops_amd64.go | 226 +++---- src/simd/simd_test.go | 18 +- src/simd/types_amd64.go | 144 +++- 15 files changed, 1118 insertions(+), 449 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 67179ef12d6594..f374cd25d0a561 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -24,8 +24,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VRCP14PS128, - ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VRCPPS128, + ssa.OpAMD64VRCPPS256, ssa.OpAMD64VRCP14PS512, ssa.OpAMD64VRCP14PD128, ssa.OpAMD64VRCP14PD256, @@ -335,6 +335,16 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQ512: p = simdV21(s, v) + case ssa.OpAMD64VPCMPEQB512, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPCMPEQD512, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VPCMPGTB512, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPGTD512, + ssa.OpAMD64VPCMPGTQ512: + p = simdV2k(s, v) + case ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPSMasked512, @@ -733,30 +743,30 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VPCMPUQ512, ssa.OpAMD64VPCMPUB128, ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUB512, ssa.OpAMD64VPCMPUW128, ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPCMPUW512, ssa.OpAMD64VPCMPUD128, ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUQ128, ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPCMPUQ512, ssa.OpAMD64VPCMPB128, ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPB512, ssa.OpAMD64VPCMPW128, ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPCMPW512, ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPD512, ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPQ256: + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPCMPQ512: p = simdV2kImm8(s, v) case ssa.OpAMD64VCMPPSMasked128, diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 0fafd69f54ba6a..7338c16cdad9b0 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1468,10 +1468,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = simdOrMaskReg(v) - case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512: + case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512, ssa.OpAMD64KMOVQstore: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG - p.From.Reg = simdReg(v.Args[1]) + p.From.Reg = simdOrMaskReg(v.Args[1]) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() ssagen.AddAux(&p.To, v) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index bb7513795d9d62..5a21c95df9e4f8 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1698,6 +1698,22 @@ (LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) (LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) +(StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) +(StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) +(StoreMask8x64 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) + +(StoreMask16x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) +(StoreMask16x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) +(StoreMask16x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) + +(StoreMask32x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) +(StoreMask32x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) +(StoreMask32x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) + +(StoreMask64x2 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) +(StoreMask64x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) +(StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) + (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index ec335f67f87679..cd4b5b2a06a8f0 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -234,7 +234,8 @@ func init() { wfpw = regInfo{inputs: []regMask{w, fp}, outputs: wonly} wfpkw = regInfo{inputs: []regMask{w, fp, mask}, outputs: wonly} - kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} + kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} + kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1318,6 +1319,7 @@ func init() { {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 6257396a6f5d24..716fe9b881843a 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -678,6 +678,19 @@ var genericOps = []opData{ {name: "LoadMask64x2", argLength: 2}, // arg0 = ptr, arg1 = mem {name: "LoadMask64x4", argLength: 2}, // arg0 = ptr, arg1 = mem {name: "LoadMask64x8", argLength: 2}, // arg0 = ptr, arg1 = mem + + {name: "StoreMask8x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask8x32", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask8x64", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask16x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask16x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask16x32", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask32x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask32x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask32x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask64x2", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask64x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask64x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e5f17bdb1b29dd..fb153acf66e074 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -152,8 +152,8 @@ (AndNotMaskedUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (AndNotMaskedUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (AndNotMaskedUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) -(ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) +(ApproximateReciprocalFloat32x4 ...) => (VRCPPS128 ...) +(ApproximateReciprocalFloat32x8 ...) => (VRCPPS256 ...) (ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) (ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) @@ -305,28 +305,28 @@ (EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) (EqualInt8x16 ...) => (VPCMPEQB128 ...) (EqualInt8x32 ...) => (VPCMPEQB256 ...) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) (EqualInt16x8 ...) => (VPCMPEQW128 ...) (EqualInt16x16 ...) => (VPCMPEQW256 ...) -(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) (EqualInt32x4 ...) => (VPCMPEQD128 ...) (EqualInt32x8 ...) => (VPCMPEQD256 ...) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) -(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) (EqualUint8x16 ...) => (VPCMPEQB128 ...) (EqualUint8x32 ...) => (VPCMPEQB256 ...) -(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) (EqualUint16x8 ...) => (VPCMPEQW128 ...) (EqualUint16x16 ...) => (VPCMPEQW256 ...) -(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) +(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) (EqualUint32x4 ...) => (VPCMPEQD128 ...) (EqualUint32x8 ...) => (VPCMPEQD256 ...) -(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) (EqualUint64x2 ...) => (VPCMPEQQ128 ...) (EqualUint64x4 ...) => (VPCMPEQQ256 ...) -(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) +(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) (EqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) (EqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) (EqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) @@ -453,16 +453,16 @@ (GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) (GreaterInt8x16 ...) => (VPCMPGTB128 ...) (GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPGTB512 x y)) (GreaterInt16x8 ...) => (VPCMPGTW128 ...) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPGTD512 x y)) (GreaterInt64x2 ...) => (VPCMPGTQ128 ...) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) (GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) (GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) (GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index a7a3c9715c45cf..5a51e4400ad65f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -33,7 +33,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PS128", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCPPS128", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -63,7 +63,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPSMasked256", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PS256", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCPPS256", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -224,6 +224,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQW512", argLength: 2, reg: w2k, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTW512", argLength: 2, reg: w2k, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -305,6 +307,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQD512", argLength: 2, reg: w2k, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTD512", argLength: 2, reg: w2k, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -526,6 +530,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQ512", argLength: 2, reg: w2k, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQ512", argLength: 2, reg: w2k, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -611,6 +617,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTB512", argLength: 2, reg: w2k, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -692,10 +700,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -705,12 +713,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -735,10 +743,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -759,8 +767,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -858,8 +866,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -872,8 +880,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -926,8 +934,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -944,16 +952,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, @@ -962,8 +970,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -976,11 +984,11 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index c8fe1e9eeee6d3..7b016b517d2253 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -912,10 +912,10 @@ func simdGenericOps() []opData { {name: "PermuteUint16x16", argLength: 2, commutative: false}, {name: "Permute2Uint16x16", argLength: 3, commutative: false}, {name: "Permute2Int16x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -966,8 +966,8 @@ func simdGenericOps() []opData { {name: "Permute2Int16x32", argLength: 3, commutative: false}, {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, @@ -1018,12 +1018,12 @@ func simdGenericOps() []opData { {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, {name: "PermuteInt16x8", argLength: 2, commutative: false}, {name: "PermuteUint16x8", argLength: 2, commutative: false}, - {name: "Permute2Int16x8", argLength: 3, commutative: false}, {name: "Permute2Uint16x8", argLength: 3, commutative: false}, + {name: "Permute2Int16x8", argLength: 3, commutative: false}, {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, @@ -1070,17 +1070,17 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, - {name: "PermuteFloat32x16", argLength: 2, commutative: false}, {name: "PermuteInt32x16", argLength: 2, commutative: false}, + {name: "PermuteFloat32x16", argLength: 2, commutative: false}, {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "Permute2Uint32x16", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, @@ -1307,15 +1307,15 @@ func simdGenericOps() []opData { {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, {name: "PermuteFloat64x4", argLength: 2, commutative: false}, - {name: "Permute2Float64x4", argLength: 3, commutative: false}, - {name: "Permute2Int64x4", argLength: 3, commutative: false}, {name: "Permute2Uint64x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1365,18 +1365,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, {name: "PermuteFloat64x8", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, - {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "Permute2Int64x8", argLength: 3, commutative: false}, {name: "Permute2Float64x8", argLength: 3, commutative: false}, {name: "Permute2Uint64x8", argLength: 3, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d69e714082b9b3..9db3dbaf572198 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1199,6 +1199,7 @@ const ( OpAMD64VZEROUPPER OpAMD64VZEROALL OpAMD64KMOVQload + OpAMD64KMOVQstore OpAMD64VADDPS512 OpAMD64VADDPSMasked512 OpAMD64VRCP14PS512 @@ -1229,7 +1230,7 @@ const ( OpAMD64VADDPS128 OpAMD64VADDPSMasked128 OpAMD64VADDSUBPS128 - OpAMD64VRCP14PS128 + OpAMD64VRCPPS128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRTPS128 OpAMD64VRSQRT14PSMasked128 @@ -1259,7 +1260,7 @@ const ( OpAMD64VADDPS256 OpAMD64VADDPSMasked256 OpAMD64VADDSUBPS256 - OpAMD64VRCP14PS256 + OpAMD64VRCPPS256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRTPS256 OpAMD64VRSQRT14PSMasked256 @@ -1420,6 +1421,8 @@ const ( OpAMD64VPADDW512 OpAMD64VPADDWMasked512 OpAMD64VPCOMPRESSWMasked512 + OpAMD64VPCMPEQW512 + OpAMD64VPCMPGTW512 OpAMD64VPMAXSW512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSW512 @@ -1501,6 +1504,8 @@ const ( OpAMD64VPANDND512 OpAMD64VPANDNDMasked512 OpAMD64VPCOMPRESSDMasked512 + OpAMD64VPCMPEQD512 + OpAMD64VPCMPGTD512 OpAMD64VPMAXSD512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSD512 @@ -1722,6 +1727,8 @@ const ( OpAMD64VPANDNQ512 OpAMD64VPANDNQMasked512 OpAMD64VPCOMPRESSQMasked512 + OpAMD64VPCMPEQQ512 + OpAMD64VPCMPGTQ512 OpAMD64VPMAXSQ512 OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQ512 @@ -1807,6 +1814,8 @@ const ( OpAMD64VPADDB512 OpAMD64VPADDBMasked512 OpAMD64VPCOMPRESSBMasked512 + OpAMD64VPCMPEQB512 + OpAMD64VPCMPGTB512 OpAMD64VPMAXSB512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSB512 @@ -1888,10 +1897,10 @@ const ( OpAMD64VPMINUD128 OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 - OpAMD64VPERMI2D128 OpAMD64VPERMI2PS128 - OpAMD64VPERMI2PSMasked128 + OpAMD64VPERMI2D128 OpAMD64VPERMI2DMasked128 + OpAMD64VPERMI2PSMasked128 OpAMD64VPSRLD128 OpAMD64VPSRLDMasked128 OpAMD64VPSRLVD128 @@ -1901,12 +1910,12 @@ const ( OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 - OpAMD64VPERMPS256 OpAMD64VPERMD256 - OpAMD64VPERMI2D256 + OpAMD64VPERMPS256 OpAMD64VPERMI2PS256 - OpAMD64VPERMI2DMasked256 + OpAMD64VPERMI2D256 OpAMD64VPERMI2PSMasked256 + OpAMD64VPERMI2DMasked256 OpAMD64VPERMPSMasked256 OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 @@ -1931,10 +1940,10 @@ const ( OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 - OpAMD64VPERMQ256 OpAMD64VPERMPD256 - OpAMD64VPERMI2Q256 + OpAMD64VPERMQ256 OpAMD64VPERMI2PD256 + OpAMD64VPERMI2Q256 OpAMD64VPERMI2PDMasked256 OpAMD64VPERMI2QMasked256 OpAMD64VPERMQMasked256 @@ -1955,8 +1964,8 @@ const ( OpAMD64VPERMI2PD512 OpAMD64VPERMI2QMasked512 OpAMD64VPERMI2PDMasked512 - OpAMD64VPERMQMasked512 OpAMD64VPERMPDMasked512 + OpAMD64VPERMQMasked512 OpAMD64VPSRLQ512 OpAMD64VPSRLQMasked512 OpAMD64VPSRLVQ512 @@ -2054,8 +2063,8 @@ const ( OpAMD64VPSHLDWMasked256 OpAMD64VPSHRDW256 OpAMD64VPSHRDWMasked256 - OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 + OpAMD64VPCMPW512 OpAMD64VPSHLDW512 OpAMD64VPSHLDWMasked512 OpAMD64VPSHRDW512 @@ -2068,8 +2077,8 @@ const ( OpAMD64VPSHLDWMasked128 OpAMD64VPSHRDW128 OpAMD64VPSHRDWMasked128 - OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 + OpAMD64VPCMPD512 OpAMD64VPROLD512 OpAMD64VPROLDMasked512 OpAMD64VPRORD512 @@ -2122,8 +2131,8 @@ const ( OpAMD64VPSHLDQMasked256 OpAMD64VPSHRDQ256 OpAMD64VPSHRDQMasked256 - OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 + OpAMD64VPCMPQ512 OpAMD64VPROLQ512 OpAMD64VPROLQMasked512 OpAMD64VPRORQ512 @@ -2140,16 +2149,16 @@ const ( OpAMD64VEXTRACTI128128 OpAMD64VPCMPB256 OpAMD64VINSERTI128256 - OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 + OpAMD64VPCMPB512 OpAMD64VPCMPUWMasked256 OpAMD64VPCMPUW256 - OpAMD64VPCMPUW512 OpAMD64VPCMPUWMasked512 + OpAMD64VPCMPUW512 OpAMD64VPCMPUWMasked128 OpAMD64VPCMPUW128 - OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked512 + OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked128 OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked256 @@ -2158,8 +2167,8 @@ const ( OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ256 - OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 + OpAMD64VPCMPUQ512 OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 OpAMD64VGF2P8AFFINEINVQB128 @@ -2172,12 +2181,12 @@ const ( OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VPCMPUB256 - OpAMD64VPCMPUB512 OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 OpAMD64VGF2P8AFFINEINVQB512 OpAMD64VGF2P8AFFINEINVQBMasked512 OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VPCMPUB512 OpARMADD OpARMADDconst @@ -4416,6 +4425,18 @@ const ( OpLoadMask64x2 OpLoadMask64x4 OpLoadMask64x8 + OpStoreMask8x16 + OpStoreMask8x32 + OpStoreMask8x64 + OpStoreMask16x8 + OpStoreMask16x16 + OpStoreMask16x32 + OpStoreMask32x4 + OpStoreMask32x8 + OpStoreMask32x16 + OpStoreMask64x2 + OpStoreMask64x4 + OpStoreMask64x8 OpAddFloat32x16 OpAddMaskedFloat32x16 OpApproximateReciprocalFloat32x16 @@ -5325,10 +5346,10 @@ const ( OpPermuteUint16x16 OpPermute2Uint16x16 OpPermute2Int16x16 - OpPermute2MaskedInt16x16 OpPermute2MaskedUint16x16 - OpPermuteMaskedUint16x16 + OpPermute2MaskedInt16x16 OpPermuteMaskedInt16x16 + OpPermuteMaskedUint16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5379,8 +5400,8 @@ const ( OpPermute2Int16x32 OpPermute2MaskedUint16x32 OpPermute2MaskedInt16x32 - OpPermuteMaskedUint16x32 OpPermuteMaskedInt16x32 + OpPermuteMaskedUint16x32 OpPopCountUint16x32 OpPopCountMaskedUint16x32 OpSaturatedAddUint16x32 @@ -5431,12 +5452,12 @@ const ( OpPairwiseSubUint16x8 OpPermuteInt16x8 OpPermuteUint16x8 - OpPermute2Int16x8 OpPermute2Uint16x8 + OpPermute2Int16x8 OpPermute2MaskedInt16x8 OpPermute2MaskedUint16x8 - OpPermuteMaskedUint16x8 OpPermuteMaskedInt16x8 + OpPermuteMaskedUint16x8 OpPopCountUint16x8 OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 @@ -5483,17 +5504,17 @@ const ( OpNotEqualMaskedUint32x16 OpOrUint32x16 OpOrMaskedUint32x16 - OpPermuteFloat32x16 OpPermuteInt32x16 + OpPermuteFloat32x16 OpPermuteUint32x16 OpPermute2Uint32x16 OpPermute2Float32x16 OpPermute2Int32x16 + OpPermute2MaskedUint32x16 OpPermute2MaskedInt32x16 OpPermute2MaskedFloat32x16 - OpPermute2MaskedUint32x16 - OpPermuteMaskedInt32x16 OpPermuteMaskedFloat32x16 + OpPermuteMaskedInt32x16 OpPermuteMaskedUint32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 @@ -5720,15 +5741,15 @@ const ( OpPermuteUint64x4 OpPermuteInt64x4 OpPermuteFloat64x4 - OpPermute2Float64x4 - OpPermute2Int64x4 OpPermute2Uint64x4 - OpPermute2MaskedFloat64x4 + OpPermute2Int64x4 + OpPermute2Float64x4 OpPermute2MaskedUint64x4 + OpPermute2MaskedFloat64x4 OpPermute2MaskedInt64x4 + OpPermuteMaskedUint64x4 OpPermuteMaskedFloat64x4 OpPermuteMaskedInt64x4 - OpPermuteMaskedUint64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5778,18 +5799,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 + OpPermuteUint64x8 OpPermuteFloat64x8 OpPermuteInt64x8 - OpPermuteUint64x8 - OpPermute2Int64x8 OpPermute2Float64x8 OpPermute2Uint64x8 + OpPermute2Int64x8 + OpPermute2MaskedFloat64x8 OpPermute2MaskedUint64x8 OpPermute2MaskedInt64x8 - OpPermute2MaskedFloat64x8 - OpPermuteMaskedUint64x8 - OpPermuteMaskedFloat64x8 OpPermuteMaskedInt64x8 + OpPermuteMaskedFloat64x8 + OpPermuteMaskedUint64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -18830,6 +18851,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "KMOVQstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, { name: "VADDPS512", argLen: 2, @@ -19281,15 +19316,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS128", + name: "VRCPPS128", argLen: 1, - asm: x86.AVRCP14PS, + asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -19728,15 +19763,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS256", + name: "VRCPPS256", argLen: 1, - asm: x86.AVRCP14PS, + asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -22122,6 +22157,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQW512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTW512", + argLen: 2, + asm: x86.AVPCMPGTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSW512", argLen: 2, @@ -23327,6 +23391,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQD512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTD512", + argLen: 2, + asm: x86.AVPCMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSD512", argLen: 2, @@ -26664,6 +26757,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQQ512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTQ512", + argLen: 2, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSQ512", argLen: 2, @@ -27922,6 +28044,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQB512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTB512", + argLen: 2, + asm: x86.AVPCMPGTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSB512", argLen: 2, @@ -29154,10 +29305,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D128", + name: "VPERMI2PS128", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29170,10 +29321,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS128", + name: "VPERMI2D128", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29186,10 +29337,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked128", + name: "VPERMI2DMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29203,10 +29354,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked128", + name: "VPERMI2PSMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29355,9 +29506,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS256", + name: "VPERMD256", argLen: 2, - asm: x86.AVPERMPS, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29369,9 +29520,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMD256", + name: "VPERMPS256", argLen: 2, - asm: x86.AVPERMD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29383,10 +29534,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D256", + name: "VPERMI2PS256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29399,10 +29550,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS256", + name: "VPERMI2D256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29415,10 +29566,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked256", + name: "VPERMI2PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29432,10 +29583,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked256", + name: "VPERMI2DMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29817,9 +29968,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQ256", + name: "VPERMPD256", argLen: 2, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29831,9 +29982,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD256", + name: "VPERMQ256", argLen: 2, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29845,10 +29996,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q256", + name: "VPERMI2PD256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2Q, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29861,10 +30012,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PD256", + name: "VPERMI2Q256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30186,9 +30337,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked512", + name: "VPERMPDMasked512", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30201,9 +30352,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked512", + name: "VPERMQMasked512", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31686,15 +31837,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", + name: "VPCMPWMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31702,16 +31854,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31904,15 +32054,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", + name: "VPCMPDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31920,16 +32071,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32723,15 +32872,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", + name: "VPCMPQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32739,16 +32889,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32998,15 +33146,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB512", + name: "VPCMPBMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33014,16 +33163,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33063,15 +33210,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW512", + name: "VPCMPUWMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33079,16 +33227,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPCMPUW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33128,15 +33274,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD512", + name: "VPCMPUDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33144,16 +33291,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33289,15 +33434,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", + name: "VPCMPUQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33305,16 +33451,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33509,22 +33653,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUB512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUBMasked512", auxType: auxInt8, @@ -33604,6 +33732,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "ADD", @@ -60816,6 +60959,78 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "StoreMask8x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x64", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x2", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, { name: "AddFloat32x16", argLen: 2, @@ -65677,22 +65892,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt16x16", + name: "Permute2MaskedUint16x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint16x16", + name: "Permute2MaskedInt16x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint16x16", + name: "PermuteMaskedInt16x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x16", + name: "PermuteMaskedUint16x16", argLen: 3, generic: true, }, @@ -65964,12 +66179,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint16x32", + name: "PermuteMaskedInt16x32", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x32", + name: "PermuteMaskedUint16x32", argLen: 3, generic: true, }, @@ -66242,12 +66457,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int16x8", + name: "Permute2Uint16x8", argLen: 3, generic: true, }, { - name: "Permute2Uint16x8", + name: "Permute2Int16x8", argLen: 3, generic: true, }, @@ -66262,12 +66477,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint16x8", + name: "PermuteMaskedInt16x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x8", + name: "PermuteMaskedUint16x8", argLen: 3, generic: true, }, @@ -66519,12 +66734,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteFloat32x16", + name: "PermuteInt32x16", argLen: 2, generic: true, }, { - name: "PermuteInt32x16", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, @@ -66549,27 +66764,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt32x16", + name: "Permute2MaskedUint32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat32x16", + name: "Permute2MaskedInt32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint32x16", + name: "Permute2MaskedFloat32x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat32x16", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, @@ -67774,7 +67989,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Float64x4", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, @@ -67784,17 +67999,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint64x4", + name: "Permute2Float64x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x4", + name: "Permute2MaskedUint64x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint64x4", + name: "Permute2MaskedFloat64x4", argLen: 4, generic: true, }, @@ -67804,17 +68019,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedFloat64x4", + name: "PermuteMaskedUint64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "PermuteMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "PermuteMaskedInt64x4", argLen: 3, generic: true, }, @@ -68082,52 +68297,52 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteFloat64x8", + name: "PermuteUint64x8", argLen: 2, generic: true, }, { - name: "PermuteInt64x8", + name: "PermuteFloat64x8", argLen: 2, generic: true, }, { - name: "PermuteUint64x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "Permute2Int64x8", + name: "Permute2Float64x8", argLen: 3, generic: true, }, { - name: "Permute2Float64x8", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, { - name: "Permute2Uint64x8", + name: "Permute2Int64x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x8", + name: "Permute2MaskedFloat64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt64x8", + name: "Permute2MaskedUint64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat64x8", + name: "Permute2MaskedInt64x8", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint64x8", + name: "PermuteMaskedInt64x8", argLen: 3, generic: true, }, @@ -68137,7 +68352,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt64x8", + name: "PermuteMaskedUint64x8", argLen: 3, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 0ff19a680e4e20..ecd4a21f43d8a8 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -985,10 +985,10 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VRCP14PS512 return true case OpApproximateReciprocalFloat32x4: - v.Op = OpAMD64VRCP14PS128 + v.Op = OpAMD64VRCPPS128 return true case OpApproximateReciprocalFloat32x8: - v.Op = OpAMD64VRCP14PS256 + v.Op = OpAMD64VRCPPS256 return true case OpApproximateReciprocalFloat64x2: v.Op = OpAMD64VRCP14PD128 @@ -5184,6 +5184,30 @@ func rewriteValueAMD64(v *Value) bool { return true case OpStore: return rewriteValueAMD64_OpStore(v) + case OpStoreMask16x16: + return rewriteValueAMD64_OpStoreMask16x16(v) + case OpStoreMask16x32: + return rewriteValueAMD64_OpStoreMask16x32(v) + case OpStoreMask16x8: + return rewriteValueAMD64_OpStoreMask16x8(v) + case OpStoreMask32x16: + return rewriteValueAMD64_OpStoreMask32x16(v) + case OpStoreMask32x4: + return rewriteValueAMD64_OpStoreMask32x4(v) + case OpStoreMask32x8: + return rewriteValueAMD64_OpStoreMask32x8(v) + case OpStoreMask64x2: + return rewriteValueAMD64_OpStoreMask64x2(v) + case OpStoreMask64x4: + return rewriteValueAMD64_OpStoreMask64x4(v) + case OpStoreMask64x8: + return rewriteValueAMD64_OpStoreMask64x8(v) + case OpStoreMask8x16: + return rewriteValueAMD64_OpStoreMask8x16(v) + case OpStoreMask8x32: + return rewriteValueAMD64_OpStoreMask8x32(v) + case OpStoreMask8x64: + return rewriteValueAMD64_OpStoreMask8x64(v) case OpSub16: v.Op = OpAMD64SUBL return true @@ -33388,13 +33412,12 @@ func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33406,13 +33429,12 @@ func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33424,13 +33446,12 @@ func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33442,13 +33463,12 @@ func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34120,13 +34140,12 @@ func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34138,13 +34157,12 @@ func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34156,13 +34174,12 @@ func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34174,13 +34191,12 @@ func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36279,13 +36295,12 @@ func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36297,13 +36312,12 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) + // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36315,13 +36329,12 @@ func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36333,13 +36346,12 @@ func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) + // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -53277,6 +53289,234 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } +func rewriteValueAMD64_OpStoreMask16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask16x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask16x32 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask16x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask32x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask32x4 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask32x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask64x2 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask64x4 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask64x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask8x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask8x32 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask8x64 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index e012b536b55e1b..0284729a525502 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1791,6 +1791,23 @@ func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ss } } +func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + opCodes := map[int]map[int]ssa.Op{ + 8: {16: ssa.OpStoreMask8x16, 32: ssa.OpStoreMask8x32, 64: ssa.OpStoreMask8x64}, + 16: {8: ssa.OpStoreMask16x8, 16: ssa.OpStoreMask16x16, 32: ssa.OpStoreMask16x32}, + 32: {4: ssa.OpStoreMask32x4, 8: ssa.OpStoreMask32x8, 16: ssa.OpStoreMask32x16}, + 64: {2: ssa.OpStoreMask64x2, 4: ssa.OpStoreMask64x4, 8: ssa.OpStoreMask64x8}, + } + op := opCodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + s.vars[memVar] = s.newValue3A(op, types.TypeMem, types.TypeMask, args[1], args[0], s.mem()) + return nil + } +} + // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 8040a187bda6d1..8b3b08f886f197 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -310,34 +310,34 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -458,22 +458,22 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.GetElem", opLen1Imm8(ssa.OpGetElemUint64x2, types.Types[types.TUINT64], 0), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) @@ -2137,59 +2137,71 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64) } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index a5c2f2d5c28187..318883ea19c63c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -918,12 +918,12 @@ func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCPPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocal() Float32x4 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCPPS, CPU Feature: AVX func (x Float32x8) ApproximateReciprocal() Float32x8 // ApproximateReciprocal computes an approximate reciprocal of each element. @@ -1951,6 +1951,11 @@ func (x Int8x16) Equal(y Int8x16) Mask8x16 // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Int8x32) Equal(y Int8x32) Mask8x32 +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX512BW +func (x Int8x64) Equal(y Int8x64) Mask8x64 + // Equal compares for equality. // // Asm: VPCMPEQW, CPU Feature: AVX @@ -1961,6 +1966,11 @@ func (x Int16x8) Equal(y Int16x8) Mask16x8 // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX512BW +func (x Int16x32) Equal(y Int16x32) Mask16x32 + // Equal compares for equality. // // Asm: VPCMPEQD, CPU Feature: AVX @@ -1971,6 +1981,11 @@ func (x Int32x4) Equal(y Int32x4) Mask32x4 // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Int32x8) Equal(y Int32x8) Mask32x8 +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX512F +func (x Int32x16) Equal(y Int32x16) Mask32x16 + // Equal compares for equality. // // Asm: VPCMPEQQ, CPU Feature: AVX @@ -1981,6 +1996,11 @@ func (x Int64x2) Equal(y Int64x2) Mask64x2 // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX512F +func (x Int64x8) Equal(y Int64x8) Mask64x8 + // Equal compares for equality. // // Asm: VPCMPEQB, CPU Feature: AVX @@ -1991,6 +2011,11 @@ func (x Uint8x16) Equal(y Uint8x16) Mask8x16 // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Uint8x32) Equal(y Uint8x32) Mask8x32 +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX512BW +func (x Uint8x64) Equal(y Uint8x64) Mask8x64 + // Equal compares for equality. // // Asm: VPCMPEQW, CPU Feature: AVX @@ -2001,6 +2026,11 @@ func (x Uint16x8) Equal(y Uint16x8) Mask16x8 // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Uint16x16) Equal(y Uint16x16) Mask16x16 +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX512BW +func (x Uint16x32) Equal(y Uint16x32) Mask16x32 + // Equal compares for equality. // // Asm: VPCMPEQD, CPU Feature: AVX @@ -2011,6 +2041,11 @@ func (x Uint32x4) Equal(y Uint32x4) Mask32x4 // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Uint32x8) Equal(y Uint32x8) Mask32x8 +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX512F +func (x Uint32x16) Equal(y Uint32x16) Mask32x16 + // Equal compares for equality. // // Asm: VPCMPEQQ, CPU Feature: AVX @@ -2021,6 +2056,11 @@ func (x Uint64x2) Equal(y Uint64x2) Mask64x2 // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Uint64x4) Equal(y Uint64x4) Mask64x4 +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX512F +func (x Uint64x8) Equal(y Uint64x8) Mask64x8 + // Equal compares for equality. // // Asm: VCMPPS, CPU Feature: AVX @@ -2051,46 +2091,6 @@ func (x Float64x4) Equal(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Equal(y Float64x8) Mask64x8 -// Equal compares for equality. -// -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) Equal(y Int8x64) Mask8x64 - -// Equal compares for equality. -// -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) Equal(y Int16x32) Mask16x32 - -// Equal compares for equality. -// -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) Equal(y Int32x16) Mask32x16 - -// Equal compares for equality. -// -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) Equal(y Int64x8) Mask64x8 - -// Equal compares for equality. -// -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) Equal(y Uint8x64) Mask8x64 - -// Equal compares for equality. -// -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) Equal(y Uint16x32) Mask16x32 - -// Equal compares for equality. -// -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) Equal(y Uint32x16) Mask32x16 - -// Equal compares for equality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) Equal(y Uint64x8) Mask64x8 - /* EqualMasked */ // EqualMasked compares for equality. @@ -2733,7 +2733,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), // with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: @@ -2746,7 +2746,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), // with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: @@ -2759,7 +2759,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 /* GaloisFieldAffineTransformMasked */ @@ -2773,7 +2773,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; @@ -2785,7 +2785,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; @@ -2797,7 +2797,7 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 /* GaloisFieldMul */ @@ -2987,6 +2987,11 @@ func (x Int8x16) Greater(y Int8x16) Mask8x16 // Asm: VPCMPGTB, CPU Feature: AVX2 func (x Int8x32) Greater(y Int8x32) Mask8x32 +// Greater compares for greater than. +// +// Asm: VPCMPGTB, CPU Feature: AVX512BW +func (x Int8x64) Greater(y Int8x64) Mask8x64 + // Greater compares for greater than. // // Asm: VPCMPGTW, CPU Feature: AVX @@ -2997,6 +3002,11 @@ func (x Int16x8) Greater(y Int16x8) Mask16x8 // Asm: VPCMPGTW, CPU Feature: AVX2 func (x Int16x16) Greater(y Int16x16) Mask16x16 +// Greater compares for greater than. +// +// Asm: VPCMPGTW, CPU Feature: AVX512BW +func (x Int16x32) Greater(y Int16x32) Mask16x32 + // Greater compares for greater than. // // Asm: VPCMPGTD, CPU Feature: AVX @@ -3007,6 +3017,11 @@ func (x Int32x4) Greater(y Int32x4) Mask32x4 // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 +// Greater compares for greater than. +// +// Asm: VPCMPGTD, CPU Feature: AVX512F +func (x Int32x16) Greater(y Int32x16) Mask32x16 + // Greater compares for greater than. // // Asm: VPCMPGTQ, CPU Feature: AVX @@ -3017,6 +3032,11 @@ func (x Int64x2) Greater(y Int64x2) Mask64x2 // Asm: VPCMPGTQ, CPU Feature: AVX2 func (x Int64x4) Greater(y Int64x4) Mask64x4 +// Greater compares for greater than. +// +// Asm: VPCMPGTQ, CPU Feature: AVX512F +func (x Int64x8) Greater(y Int64x8) Mask64x8 + // Greater compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX @@ -3047,26 +3067,6 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) Greater(y Int8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) Greater(y Int16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) Greater(y Int32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) Greater(y Int64x8) Mask64x8 - // Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW @@ -6475,84 +6475,84 @@ func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 /* Permute */ -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x16) Permute(indices Uint8x16) Int8x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x32) Permute(indices Uint8x32) Int8x32 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x32) Permute(indices Uint8x32) Uint8x32 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x64) Permute(indices Uint8x64) Int8x64 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x8) Permute(indices Uint16x8) Int16x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x16) Permute(indices Uint16x16) Int16x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x32) Permute(indices Uint16x32) Int16x32 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -6580,63 +6580,63 @@ func (x Int32x8) Permute(indices Uint32x8) Int32x8 // Asm: VPERMD, CPU Feature: AVX2 func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x16) Permute(indices Uint32x16) Float32x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x16) Permute(indices Uint32x16) Int32x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x4) Permute(indices Uint64x4) Float64x4 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x4) Permute(indices Uint64x4) Int64x4 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x8) Permute(indices Uint64x8) Float64x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x8) Permute(indices Uint64x8) Int64x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7189,7 +7189,7 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Ui /* PermuteMasked */ -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7198,7 +7198,7 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Ui // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7207,7 +7207,7 @@ func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7216,7 +7216,7 @@ func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7225,7 +7225,7 @@ func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7234,7 +7234,7 @@ func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7243,7 +7243,7 @@ func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7252,7 +7252,7 @@ func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7261,7 +7261,7 @@ func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7270,7 +7270,7 @@ func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7279,7 +7279,7 @@ func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7288,7 +7288,7 @@ func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7297,7 +7297,7 @@ func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7306,7 +7306,7 @@ func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7315,7 +7315,7 @@ func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7324,7 +7324,7 @@ func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7333,7 +7333,7 @@ func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7342,7 +7342,7 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7351,7 +7351,7 @@ func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7360,7 +7360,7 @@ func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7369,7 +7369,7 @@ func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7378,7 +7378,7 @@ func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7387,7 +7387,7 @@ func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7396,7 +7396,7 @@ func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 276ae9ed5d6924..d4f539eea2c1a3 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -461,7 +461,7 @@ func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) si } } -func TestBitMask(t *testing.T) { +func TestBitMaskLoad(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") return @@ -477,3 +477,19 @@ func TestBitMask(t *testing.T) { } } } + +func TestBitMaskStore(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + var want uint64 = 0b101 + var got uint64 + x := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + y := simd.LoadInt32x4Slice([]int32{5, 0, 5, 0}) + m := y.Greater(x) + m.StoreToBits(&got) + if got != want { + t.Errorf("Result incorrect: want %b, got %b", want, got) + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index ccc8427bb3efe1..998a8f9fe1df0b 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -205,48 +205,88 @@ type Mask8x16 struct { vals [16]int8 } -// Mask8x16FromBits constructs a Mask8x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask8x16FromBits constructs a Mask8x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask8x16FromBits(y *uint64) Mask8x16 +// StoreToBits stores a Mask8x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask8x16) StoreToBits(y *uint64) + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 vals [8]int16 } -// Mask16x8FromBits constructs a Mask16x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask16x8FromBits constructs a Mask16x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask16x8FromBits(y *uint64) Mask16x8 +// StoreToBits stores a Mask16x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask16x8) StoreToBits(y *uint64) + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 vals [4]int32 } -// Mask32x4FromBits constructs a Mask32x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask32x4FromBits constructs a Mask32x4 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask32x4FromBits(y *uint64) Mask32x4 +// StoreToBits stores a Mask32x4 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask32x4) StoreToBits(y *uint64) + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 vals [2]int64 } -// Mask64x2FromBits constructs a Mask64x2 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask64x2FromBits constructs a Mask64x2 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 2 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask64x2FromBits(y *uint64) Mask64x2 +// StoreToBits stores a Mask64x2 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask64x2) StoreToBits(y *uint64) + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -448,48 +488,88 @@ type Mask8x32 struct { vals [32]int8 } -// Mask8x32FromBits constructs a Mask8x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask8x32FromBits constructs a Mask8x32 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask8x32FromBits(y *uint64) Mask8x32 +// StoreToBits stores a Mask8x32 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask8x32) StoreToBits(y *uint64) + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 vals [16]int16 } -// Mask16x16FromBits constructs a Mask16x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask16x16FromBits constructs a Mask16x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask16x16FromBits(y *uint64) Mask16x16 +// StoreToBits stores a Mask16x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask16x16) StoreToBits(y *uint64) + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 vals [8]int32 } -// Mask32x8FromBits constructs a Mask32x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask32x8FromBits constructs a Mask32x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask32x8FromBits(y *uint64) Mask32x8 +// StoreToBits stores a Mask32x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask32x8) StoreToBits(y *uint64) + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 vals [4]int64 } -// Mask64x4FromBits constructs a Mask64x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask64x4FromBits constructs a Mask64x4 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask64x4FromBits(y *uint64) Mask64x4 +// StoreToBits stores a Mask64x4 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask64x4) StoreToBits(y *uint64) + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -691,44 +771,84 @@ type Mask8x64 struct { vals [64]int8 } -// Mask8x64FromBits constructs a Mask8x64 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask8x64FromBits constructs a Mask8x64 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 64 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask8x64FromBits(y *uint64) Mask8x64 +// StoreToBits stores a Mask8x64 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask8x64) StoreToBits(y *uint64) + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 vals [32]int16 } -// Mask16x32FromBits constructs a Mask16x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask16x32FromBits constructs a Mask16x32 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask16x32FromBits(y *uint64) Mask16x32 +// StoreToBits stores a Mask16x32 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask16x32) StoreToBits(y *uint64) + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 vals [16]int32 } -// Mask32x16FromBits constructs a Mask32x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask32x16FromBits constructs a Mask32x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask32x16FromBits(y *uint64) Mask32x16 +// StoreToBits stores a Mask32x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask32x16) StoreToBits(y *uint64) + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 vals [8]int64 } -// Mask64x8FromBits constructs a Mask64x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask64x8FromBits constructs a Mask64x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask64x8FromBits(y *uint64) Mask64x8 + +// StoreToBits stores a Mask64x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask64x8) StoreToBits(y *uint64) From 88568519b416190d264f5e5f02c41b5a139498b2 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 7 Jul 2025 17:48:24 -0400 Subject: [PATCH 095/139] [dev.simd] simd: move test generation into Go repo This pairs with CL 689275 which removes test generation from simdgen This uses generics and attempts to encode the tests as compactly as possible. Some files, *_helpers_test.go, are generated. Use t.Helper() to get the line number right for a failure. Adds helper error return values and early exits to only report a single test failure per operations and vector shape, for the generated test failures. Include the entire got and wanted vectors for that failure. Provide an option to include the input vectors to failures, also report the type of the test. Sample failure test output (obtained by intentionally breaking the "want" value for AndNot): === RUN TestAndNot binary_test.go:214: For int16 vector elements: binary_test.go:214: got =[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] binary_test.go:214: want=[-1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1] binary_test.go:214: x=[1 -1 0 2 4 8 1024 3 5 7 11 13 3000 5555 7777 11111] binary_test.go:214: y=[1 -1 0 2 4 8 1024 3 5 7 11 13 3000 5555 7777 11111] binary_test.go:214: at index 0, got=0, want=-1 binary_test.go:215: For int16 vector elements: binary_test.go:215: got =[0 0 0 0 0 0 0 0] binary_test.go:215: want=[-1 -1 -1 -1 -1 -1 -1 -1] binary_test.go:215: x=[1 -1 0 2 4 8 1024 3] binary_test.go:215: y=[1 -1 0 2 4 8 1024 3] binary_test.go:215: at index 0, got=0, want=-1 binary_test.go:216: For int32 vector elements: binary_test.go:216: got =[0 0 0 0] binary_test.go:216: want=[-1 -1 -1 -1] binary_test.go:216: x=[1 -1 0 2] binary_test.go:216: y=[1 -1 0 2] binary_test.go:216: at index 0, got=0, want=-1 (etc) Change-Id: I0f6ee8390ebe7a2333002e9415b4d71527fa3c38 Reviewed-on: https://go-review.googlesource.com/c/go/+/686057 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/binary_helpers_test.go | 464 ++ src/simd/binary_test.go | 361 ++ src/simd/compare_helpers_test.go | 464 ++ src/simd/compare_test.go | 295 + src/simd/comparemasked_helpers_test.go | 734 +++ src/simd/genfiles.go | 287 + src/simd/genslice.go | 117 - src/simd/helpers_test.go | 299 + src/simd/no_tag.go | 2 +- src/simd/simd_test.go | 101 +- src/simd/simd_wrapped_test.go | 8021 ------------------------ src/simd/simulation_helpers_test.go | 204 + src/simd/slice_amd64.go | 5 +- src/simd/slicepart_test.go | 35 +- src/simd/ternary_helpers_test.go | 494 ++ src/simd/ternary_test.go | 23 + src/simd/unary_helpers_test.go | 434 ++ src/simd/unary_test.go | 84 + 18 files changed, 4182 insertions(+), 8242 deletions(-) create mode 100644 src/simd/binary_helpers_test.go create mode 100644 src/simd/binary_test.go create mode 100644 src/simd/compare_helpers_test.go create mode 100644 src/simd/compare_test.go create mode 100644 src/simd/comparemasked_helpers_test.go create mode 100644 src/simd/genfiles.go delete mode 100644 src/simd/genslice.go create mode 100644 src/simd/helpers_test.go delete mode 100644 src/simd/simd_wrapped_test.go create mode 100644 src/simd/simulation_helpers_test.go create mode 100644 src/simd/ternary_helpers_test.go create mode 100644 src/simd/ternary_test.go create mode 100644 src/simd/unary_helpers_test.go create mode 100644 src/simd/unary_test.go diff --git a/src/simd/binary_helpers_test.go b/src/simd/binary_helpers_test.go new file mode 100644 index 00000000000000..b5055980586d74 --- /dev/null +++ b/src/simd/binary_helpers_test.go @@ -0,0 +1,464 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing binary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, want func(_, _ []int8) []int8) { + n := 16 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { + n := 8 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { + n := 4 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x2Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x2Binary(t *testing.T, f func(_, _ simd.Uint64x2) simd.Uint64x2, want func(_, _ []uint64) []uint64) { + n := 2 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x4Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x4Binary(t *testing.T, f func(_, _ simd.Float32x4) simd.Float32x4, want func(_, _ []float32) []float32) { + n := 4 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x2Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x2Binary(t *testing.T, f func(_, _ simd.Float64x2) simd.Float64x2, want func(_, _ []float64) []float64) { + n := 2 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, want func(_, _ []int8) []int8) { + n := 32 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { + n := 16 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { + n := 8 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x4Binary(t *testing.T, f func(_, _ simd.Uint64x4) simd.Uint64x4, want func(_, _ []uint64) []uint64) { + n := 4 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x8Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x8Binary(t *testing.T, f func(_, _ simd.Float32x8) simd.Float32x8, want func(_, _ []float32) []float32) { + n := 8 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x4Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x4Binary(t *testing.T, f func(_, _ simd.Float64x4) simd.Float64x4, want func(_, _ []float64) []float64) { + n := 4 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x64Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, want func(_, _ []int8) []int8) { + n := 64 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { + n := 32 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { + n := 16 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x8Binary(t *testing.T, f func(_, _ simd.Uint64x8) simd.Uint64x8, want func(_, _ []uint64) []uint64) { + n := 8 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x16Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x16Binary(t *testing.T, f func(_, _ simd.Float32x16) simd.Float32x16, want func(_, _ []float32) []float32) { + n := 16 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x8Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x8Binary(t *testing.T, f func(_, _ simd.Float64x8) simd.Float64x8, want func(_, _ []float64) []float64) { + n := 8 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} diff --git a/src/simd/binary_test.go b/src/simd/binary_test.go new file mode 100644 index 00000000000000..4221e741449f15 --- /dev/null +++ b/src/simd/binary_test.go @@ -0,0 +1,361 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestAdd(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Add, addSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Add, addSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Add, addSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Add, addSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.Add, addSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Add, addSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Add, addSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Add, addSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Add, addSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Add, addSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Add, addSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Add, addSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Add, addSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Add, addSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Add, addSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Add, addSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Add, addSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Add, addSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Add, addSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Add, addSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Add, addSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Add, addSlice[float64]) + testInt8x64Binary(t, simd.Int8x64.Add, addSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Add, addSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Add, addSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Add, addSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Add, addSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Add, addSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Add, addSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Add, addSlice[uint64]) + } +} + +func TestSub(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Sub, subSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Sub, subSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Sub, subSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Sub, subSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.Sub, subSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Sub, subSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Sub, subSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Sub, subSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Sub, subSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Sub, subSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Sub, subSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Sub, subSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Sub, subSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Sub, subSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Sub, subSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Sub, subSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Sub, subSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Sub, subSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Sub, subSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Sub, subSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Sub, subSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Sub, subSlice[float64]) + testInt8x64Binary(t, simd.Int8x64.Sub, subSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Sub, subSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Sub, subSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Sub, subSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Sub, subSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Sub, subSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Sub, subSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Sub, subSlice[uint64]) + } +} + +func TestMax(t *testing.T) { + // testFloat32x4Binary(t, simd.Float32x4.Max, maxSlice[float32]) // nan is wrong + // testFloat32x8Binary(t, simd.Float32x8.Max, maxSlice[float32]) // nan is wrong + // testFloat64x2Binary(t, simd.Float64x2.Max, maxSlice[float64]) // nan is wrong + // testFloat64x4Binary(t, simd.Float64x4.Max, maxSlice[float64]) // nan is wrong + + testInt16x16Binary(t, simd.Int16x16.Max, maxSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Max, maxSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Max, maxSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Max, maxSlice[int32]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.Max, maxSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Max, maxSlice[int64]) + } + + testInt8x16Binary(t, simd.Int8x16.Max, maxSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Max, maxSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Max, maxSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Max, maxSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Max, maxSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Max, maxSlice[uint32]) + + if simd.HasAVX512() { + testUint64x2Binary(t, simd.Uint64x2.Max, maxSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Max, maxSlice[uint64]) + } + + testUint8x16Binary(t, simd.Uint8x16.Max, maxSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Max, maxSlice[uint8]) + + if simd.HasAVX512() { + // testFloat32x16Binary(t, simd.Float32x16.Max, maxSlice[float32]) // nan is wrong + // testFloat64x8Binary(t, simd.Float64x8.Max, maxSlice[float64]) // nan is wrong + testInt8x64Binary(t, simd.Int8x64.Max, maxSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Max, maxSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Max, maxSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Max, maxSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Max, maxSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Max, maxSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Max, maxSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Max, maxSlice[uint64]) + } +} + +func TestMin(t *testing.T) { + // testFloat32x4Binary(t, simd.Float32x4.Min, minSlice[float32]) // nan is wrong + // testFloat32x8Binary(t, simd.Float32x8.Min, minSlice[float32]) // nan is wrong + // testFloat64x2Binary(t, simd.Float64x2.Min, minSlice[float64]) // nan is wrong + // testFloat64x4Binary(t, simd.Float64x4.Min, minSlice[float64]) // nan is wrong + + testInt16x16Binary(t, simd.Int16x16.Min, minSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Min, minSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Min, minSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Min, minSlice[int32]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.Min, minSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Min, minSlice[int64]) + } + + testInt8x16Binary(t, simd.Int8x16.Min, minSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Min, minSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Min, minSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Min, minSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Min, minSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Min, minSlice[uint32]) + + if simd.HasAVX512() { + testUint64x2Binary(t, simd.Uint64x2.Min, minSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Min, minSlice[uint64]) + } + + testUint8x16Binary(t, simd.Uint8x16.Min, minSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Min, minSlice[uint8]) + + if simd.HasAVX512() { + // testFloat32x16Binary(t, simd.Float32x16.Min, minSlice[float32]) // nan is wrong + // testFloat64x8Binary(t, simd.Float64x8.Min, minSlice[float64]) // nan is wrong + testInt8x64Binary(t, simd.Int8x64.Min, minSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Min, minSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Min, minSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Min, minSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Min, minSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Min, minSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Min, minSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Min, minSlice[uint64]) + } +} + +func TestAnd(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.And, andSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.And, andSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.And, andSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.And, andSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.And, andSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.And, andSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.And, andSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.And, andSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.And, andSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.And, andSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.And, andSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.And, andSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.And, andSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.And, andSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.And, andSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.And, andSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.And, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.And, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.And, andSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.And, andSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.And, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.And, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.And, andSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.And, andSlice[uint64]) + } +} + +func TestAndNot(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.AndNot, andNotSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.AndNot, andNotSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.AndNot, andNotSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.AndNot, andNotSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.AndNot, andNotSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.AndNot, andNotSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.AndNot, andNotSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.AndNot, andNotSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.AndNot, andNotSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.AndNot, andNotSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.AndNot, andNotSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.AndNot, andNotSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.AndNot, andNotSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.AndNot, andNotSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.AndNot, andNotSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.AndNot, andNotSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.AndNot, andNotSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.AndNot, andNotSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.AndNot, andNotSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.AndNot, andNotSlice[uint64]) + } +} + +func TestXor(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.Xor, xorSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Xor, xorSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Xor, xorSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Xor, xorSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Xor, xorSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Xor, xorSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Xor, xorSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Xor, xorSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Xor, xorSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Xor, xorSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Xor, xorSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Xor, xorSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Xor, xorSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Xor, xorSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Xor, xorSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Xor, xorSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.Xor, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.Xor, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.Xor, xorSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Xor, xorSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Xor, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.Xor, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.Xor, xorSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Xor, xorSlice[uint64]) + } +} + +func TestOr(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.Or, orSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Or, orSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Or, orSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Or, orSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Or, orSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Or, orSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Or, orSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Or, orSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Or, orSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Or, orSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Or, orSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Or, orSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Or, orSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Or, orSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Or, orSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Or, orSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.Or, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.Or, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.Or, orSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Or, orSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Or, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.Or, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.Or, orSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Or, orSlice[uint64]) + } +} + +func TestMul(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Mul, mulSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Mul, mulSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Mul, mulSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Mul, mulSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.MulLow, mulSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.MulLow, mulSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.MulLow, mulSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.MulLow, mulSlice[int32]) + + // testInt8x16Binary(t, simd.Int8x16.MulLow, mulSlice[int8]) // nope + // testInt8x32Binary(t, simd.Int8x32.MulLow, mulSlice[int8]) + + // TODO we should be able to do these, there's no difference between signed/unsigned mulLow + // testUint16x16Binary(t, simd.Uint16x16.MulLow, mulSlice[uint16]) + // testUint16x8Binary(t, simd.Uint16x8.MulLow, mulSlice[uint16]) + // testUint32x4Binary(t, simd.Uint32x4.MulLow, mulSlice[uint32]) + // testUint32x8Binary(t, simd.Uint32x8.MulLow, mulSlice[uint32]) + // testUint64x2Binary(t, simd.Uint64x2.MulLow, mulSlice[uint64]) + // testUint64x4Binary(t, simd.Uint64x4.MulLow, mulSlice[uint64]) + + // testUint8x16Binary(t, simd.Uint8x16.MulLow, mulSlice[uint8]) // nope + // testUint8x32Binary(t, simd.Uint8x32.MulLow, mulSlice[uint8]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.MulLow, mulSlice[int64]) // avx512 only + testInt64x4Binary(t, simd.Int64x4.MulLow, mulSlice[int64]) + + testFloat32x16Binary(t, simd.Float32x16.Mul, mulSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Mul, mulSlice[float64]) + + // testInt8x64Binary(t, simd.Int8x64.MulLow, mulSlice[int8]) // nope + testInt16x32Binary(t, simd.Int16x32.MulLow, mulSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.MulLow, mulSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.MulLow, mulSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.MulLow, mulSlice[uint8]) // nope + + // TODO signed should do the job + // testUint16x32Binary(t, simd.Uint16x32.MulLow, mulSlice[uint16]) + // testUint32x16Binary(t, simd.Uint32x16.MulLow, mulSlice[uint32]) + // testUint64x8Binary(t, simd.Uint64x8.MulLow, mulSlice[uint64]) + } +} + +func TestDiv(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Div, divSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Div, divSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Div, divSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Div, divSlice[float64]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Div, divSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Div, divSlice[float64]) + } +} diff --git a/src/simd/compare_helpers_test.go b/src/simd/compare_helpers_test.go new file mode 100644 index 00000000000000..948386307ca6b7 --- /dev/null +++ b/src/simd/compare_helpers_test.go @@ -0,0 +1,464 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing simd methods that compare two operands. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, want func(_, _ []int8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, want func(_, _ []int16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, want func(_, _ []int32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, want func(_, _ []uint64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x4Compare(t *testing.T, f func(_, _ simd.Float32x4) simd.Mask32x4, want func(_, _ []float32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x2Compare(t *testing.T, f func(_, _ simd.Float64x2) simd.Mask64x2, want func(_, _ []float64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, want func(_, _ []int8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16, want func(_, _ []int16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, want func(_, _ []int32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, want func(_, _ []uint64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x8Compare(t *testing.T, f func(_, _ simd.Float32x8) simd.Mask32x8, want func(_, _ []float32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x4Compare(t *testing.T, f func(_, _ simd.Float64x4) simd.Mask64x4, want func(_, _ []float64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, want func(_, _ []int8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32, want func(_, _ []int16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16, want func(_, _ []int32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, want func(_, _ []uint64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x16Compare(t *testing.T, f func(_, _ simd.Float32x16) simd.Mask32x16, want func(_, _ []float32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x8Compare(t *testing.T, f func(_, _ simd.Float64x8) simd.Mask64x8, want func(_, _ []float64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} diff --git a/src/simd/compare_test.go b/src/simd/compare_test.go new file mode 100644 index 00000000000000..19b1f3886d7d1b --- /dev/null +++ b/src/simd/compare_test.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +// AVX 2 lacks most comparisons, but they can be synthesized +// from > and = +var comparisonFixed bool = simd.HasAVX512() + +func TestLessMasked(t *testing.T) { + if simd.HasAVX512() { + testFloat32x4CompareMasked(t, simd.Float32x4.LessMasked, lessSlice[float32]) + testFloat32x8CompareMasked(t, simd.Float32x8.LessMasked, lessSlice[float32]) + testFloat64x2CompareMasked(t, simd.Float64x2.LessMasked, lessSlice[float64]) + testFloat64x4CompareMasked(t, simd.Float64x4.LessMasked, lessSlice[float64]) + + testInt16x16CompareMasked(t, simd.Int16x16.LessMasked, lessSlice[int16]) + testInt16x8CompareMasked(t, simd.Int16x8.LessMasked, lessSlice[int16]) + testInt32x4CompareMasked(t, simd.Int32x4.LessMasked, lessSlice[int32]) + testInt32x8CompareMasked(t, simd.Int32x8.LessMasked, lessSlice[int32]) + testInt64x2CompareMasked(t, simd.Int64x2.LessMasked, lessSlice[int64]) + testInt64x4CompareMasked(t, simd.Int64x4.LessMasked, lessSlice[int64]) + testInt8x16CompareMasked(t, simd.Int8x16.LessMasked, lessSlice[int8]) + testInt8x32CompareMasked(t, simd.Int8x32.LessMasked, lessSlice[int8]) + + testUint16x16CompareMasked(t, simd.Uint16x16.LessMasked, lessSlice[uint16]) + testUint16x8CompareMasked(t, simd.Uint16x8.LessMasked, lessSlice[uint16]) + testUint32x4CompareMasked(t, simd.Uint32x4.LessMasked, lessSlice[uint32]) + testUint32x8CompareMasked(t, simd.Uint32x8.LessMasked, lessSlice[uint32]) + testUint64x2CompareMasked(t, simd.Uint64x2.LessMasked, lessSlice[uint64]) + testUint64x4CompareMasked(t, simd.Uint64x4.LessMasked, lessSlice[uint64]) + testUint8x16CompareMasked(t, simd.Uint8x16.LessMasked, lessSlice[uint8]) + testUint8x32CompareMasked(t, simd.Uint8x32.LessMasked, lessSlice[uint8]) + + testFloat32x16CompareMasked(t, simd.Float32x16.LessMasked, lessSlice[float32]) + testFloat64x8CompareMasked(t, simd.Float64x8.LessMasked, lessSlice[float64]) + testInt8x64CompareMasked(t, simd.Int8x64.LessMasked, lessSlice[int8]) + testInt16x32CompareMasked(t, simd.Int16x32.LessMasked, lessSlice[int16]) + testInt32x16CompareMasked(t, simd.Int32x16.LessMasked, lessSlice[int32]) + testInt64x8CompareMasked(t, simd.Int64x8.LessMasked, lessSlice[int64]) + testUint8x64CompareMasked(t, simd.Uint8x64.LessMasked, lessSlice[uint8]) + testUint16x32CompareMasked(t, simd.Uint16x32.LessMasked, lessSlice[uint16]) + testUint32x16CompareMasked(t, simd.Uint32x16.LessMasked, lessSlice[uint32]) + testUint64x8CompareMasked(t, simd.Uint64x8.LessMasked, lessSlice[uint64]) + } +} + +func TestLess(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Less, lessSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Less, lessSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Less, lessSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Less, lessSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + } + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.Less, lessSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Less, lessSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Less, lessSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Less, lessSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Less, lessSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Less, lessSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Less, lessSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Less, lessSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Less, lessSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Less, lessSlice[uint64]) + } +} + +func TestLessEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.LessEqual, lessEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.LessEqual, lessEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.LessEqual, lessEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.LessEqual, lessEqualSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) + + } + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.LessEqual, lessEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.LessEqual, lessEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.LessEqual, lessEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.LessEqual, lessEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.LessEqual, lessEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.LessEqual, lessEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.LessEqual, lessEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.LessEqual, lessEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.LessEqual, lessEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.LessEqual, lessEqualSlice[uint64]) + } +} + +func TestGreater(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Greater, greaterSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Greater, greaterSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Greater, greaterSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Greater, greaterSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.Greater, greaterSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Greater, greaterSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Greater, greaterSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Greater, greaterSlice[int32]) + + testInt64x2Compare(t, simd.Int64x2.Greater, greaterSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Greater, greaterSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Greater, greaterSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Greater, greaterSlice[int8]) + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) + + testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.Greater, greaterSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Greater, greaterSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Greater, greaterSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Greater, greaterSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Greater, greaterSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Greater, greaterSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Greater, greaterSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Greater, greaterSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Greater, greaterSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Greater, greaterSlice[uint64]) + } +} + +func TestGreaterEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.GreaterEqual, greaterEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.GreaterEqual, greaterEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.GreaterEqual, greaterEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.GreaterEqual, greaterEqualSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) + + } + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.GreaterEqual, greaterEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.GreaterEqual, greaterEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.GreaterEqual, greaterEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.GreaterEqual, greaterEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.GreaterEqual, greaterEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.GreaterEqual, greaterEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.GreaterEqual, greaterEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.GreaterEqual, greaterEqualSlice[uint64]) + } +} + +func TestEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Equal, equalSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Equal, equalSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Equal, equalSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Equal, equalSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.Equal, equalSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Equal, equalSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Equal, equalSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Equal, equalSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Equal, equalSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Equal, equalSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Equal, equalSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Equal, equalSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.Equal, equalSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Equal, equalSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Equal, equalSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Equal, equalSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Equal, equalSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Equal, equalSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Equal, equalSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Equal, equalSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.Equal, equalSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Equal, equalSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Equal, equalSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Equal, equalSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Equal, equalSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Equal, equalSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Equal, equalSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Equal, equalSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Equal, equalSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Equal, equalSlice[uint64]) + } +} + +func TestNotEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.NotEqual, notEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.NotEqual, notEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.NotEqual, notEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.NotEqual, notEqualSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) + } + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.NotEqual, notEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.NotEqual, notEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.NotEqual, notEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.NotEqual, notEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.NotEqual, notEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.NotEqual, notEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.NotEqual, notEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.NotEqual, notEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.NotEqual, notEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.NotEqual, notEqualSlice[uint64]) + } +} diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go new file mode 100644 index 00000000000000..5a70f92f265805 --- /dev/null +++ b/src/simd/comparemasked_helpers_test.go @@ -0,0 +1,734 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing simd methods that compare two operands under a mask. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x16CompareMasked(t *testing.T, + f func(_, _ simd.Int8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []int8) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x8CompareMasked(t *testing.T, + f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []int16) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x4CompareMasked(t *testing.T, + f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []int32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x2CompareMasked(t *testing.T, + f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x2CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []uint64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x4CompareMasked(t *testing.T, + f func(_, _ simd.Float32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []float32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x2CompareMasked(t *testing.T, + f func(_, _ simd.Float64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []float64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x32CompareMasked(t *testing.T, + f func(_, _ simd.Int8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []int8) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x16CompareMasked(t *testing.T, + f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []int16) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x8CompareMasked(t *testing.T, + f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []int32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x4CompareMasked(t *testing.T, + f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []uint64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x8CompareMasked(t *testing.T, + f func(_, _ simd.Float32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []float32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x4CompareMasked(t *testing.T, + f func(_, _ simd.Float64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []float64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x64CompareMasked(t *testing.T, + f func(_, _ simd.Int8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []int8) []int64) { + n := 64 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x64CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x32CompareMasked(t *testing.T, + f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []int16) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x16CompareMasked(t *testing.T, + f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []int32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x8CompareMasked(t *testing.T, + f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []uint64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x16CompareMasked(t *testing.T, + f func(_, _ simd.Float32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []float32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x8CompareMasked(t *testing.T, + f func(_, _ simd.Float64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []float64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go new file mode 100644 index 00000000000000..8dac158fe43956 --- /dev/null +++ b/src/simd/genfiles.go @@ -0,0 +1,287 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// this generates type-instantiated boilerplate code for +// slice operations and tests + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "os" + "strings" + "text/template" +) + +func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer) { + b := width * count + if b < 128 || b > 512 { + return + } + BaseType := strings.ToUpper(baseType[:1]) + baseType[1:] + eType := fmt.Sprintf("%s%d", baseType, width) + wxc := fmt.Sprintf("%dx%d", width, count) + vType := fmt.Sprintf("%s%s", BaseType, wxc) + aOrAn := "a" + if strings.Contains("aeiou", baseType[:1]) { + aOrAn = "an" + } + t.Execute(out, struct { + Vec string + AOrAn string + Width int + Count int + WxC string + Type string + }{ + Vec: vType, + AOrAn: aOrAn, + Width: width, + Count: count, + WxC: wxc, + Type: eType, + }) +} + +func forTemplates(t *template.Template, out io.Writer) { + vecs := []int{128, 256, 512} + ints := []int{8, 16, 32, 64} + floats := []int{32, 64} + for _, v := range vecs { + for _, w := range ints { + c := v / w + oneTemplate(t, "int", w, c, out) + oneTemplate(t, "uint", w, c, out) + } + for _, w := range floats { + c := v / w + oneTemplate(t, "float", w, c, out) + } + } +} + +func prologue(s string, out io.Writer) { + fmt.Fprintf(out, + `// Code generated by '%s'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +`, s) +} + +func testPrologue(t, s string, out io.Writer) { + fmt.Fprintf(out, + `// Code generated by '%s'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing %s. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +`, s, t) +} + +func curryTestPrologue(t string) func(s string, out io.Writer) { + return func(s string, out io.Writer) { + testPrologue(t, s, out) + } +} + +// //go:noescape +// func LoadUint8x16Slice(s []uint8) Uint8x16 { +// return LoadUint8x16((*[16]uint8)(s[:16])) +// } + +// //go:noescape +// func (x Uint8x16) StoreSlice(s []uint8) { +// x.Store((*[16]uint8)(s[:16])) +// } + +func templateOf(name, temp string) *template.Template { + return template.Must(template.New(name).Parse(temp)) +} + +var sliceTemplate = templateOf("slice", ` +// Load{{.Vec}}Slice loads {{.AOrAn}} {{.Vec}} from a slice of at least {{.Count}} {{.Type}}s +func Load{{.Vec}}Slice(s []{{.Type}}) {{.Vec}} { + return Load{{.Vec}}((*[{{.Count}}]{{.Type}})(s)) +} + +// StoreSlice stores x into a slice of at least {{.Count}} {{.Type}}s +func (x {{.Vec}}) StoreSlice(s []{{.Type}}) { + x.Store((*[{{.Count}}]{{.Type}})(s)) +} +`) + +var unaryTemplate = templateOf("unary_helpers", ` +// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want +func test{{.Vec}}Unary(t *testing.T, f func(_ simd.{{.Vec}}) simd.{{.Vec}}, want func(_ []{{.Type}}) []{{.Type}}) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + g := make([]{{.Type}}, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + +var binaryTemplate = templateOf("binary_helpers", ` +// test{{.Vec}}Binary tests the simd binary method f against the expected behavior generated by want +func test{{.Vec}}Binary(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _ []{{.Type}}) []{{.Type}}) { + n := {{.Count}} + t.Helper() + forSlicePair(t, {{.Type}}s, n, func(x, y []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + g := make([]{{.Type}}, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) + }) +} +`) + +var ternaryTemplate = templateOf("ternary_helpers", ` +// test{{.Vec}}Ternary tests the simd ternary method f against the expected behavior generated by want +func test{{.Vec}}Ternary(t *testing.T, f func(_, _, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _, _ []{{.Type}}) []{{.Type}}) { + n := {{.Count}} + t.Helper() + forSliceTriple(t, {{.Type}}s, n, func(x, y, z []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + c := simd.Load{{.Vec}}Slice(z) + g := make([]{{.Type}}, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z); }) + }) +} +`) + +var compareTemplate = templateOf("compare_helpers", ` +// test{{.Vec}}Compare tests the simd comparison method f against the expected behavior generated by want +func test{{.Vec}}Compare(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.Mask{{.WxC}}, want func(_, _ []{{.Type}}) []int64) { + n := {{.Count}} + t.Helper() + forSlicePair(t, {{.Type}}s, n, func(x, y []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + g := make([]int{{.Width}}, n) + f(a, b).AsInt{{.WxC}}().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) + }) +} +`) + +// TODO this has not been tested yet. +var compareMaskedTemplate = templateOf("comparemasked_helpers", ` +// test{{.Vec}}CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func test{{.Vec}}CompareMasked(t *testing.T, + f func(_, _ simd.{{.Vec}}, m simd.Mask{{.WxC}}) simd.Mask{{.WxC}}, + want func(_, _ []{{.Type}}) []int64) { + n := {{.Count}} + t.Helper() + forSlicePairMasked(t, {{.Type}}s, n, func(x, y []{{.Type}}, m []bool) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + k := simd.LoadInt{{.WxC}}Slice(toVect[int{{.Width}}](m)).AsMask{{.WxC}}() + g := make([]int{{.Width}}, n) + f(a, b, k).AsInt{{.WxC}}().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m); }) + }) +} +`) + +func main() { + sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") + bh := flag.String("bh", "binary_helpers_test.go", "file name for binary test helpers") + uh := flag.String("uh", "unary_helpers_test.go", "file name for unary test helpers") + th := flag.String("th", "ternary_helpers_test.go", "file name for ternary test helpers") + ch := flag.String("ch", "compare_helpers_test.go", "file name for compare test helpers") + cmh := flag.String("cmh", "comparemasked_helpers_test.go", "file name for compare-masked test helpers") + flag.Parse() + + if *sl != "" { + one(*sl, prologue, sliceTemplate) + } + if *uh != "" { + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate) + } + if *bh != "" { + one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) + } + if *th != "" { + one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate) + } + if *ch != "" { + one(*ch, curryTestPrologue("simd methods that compare two operands"), compareTemplate) + } + if *cmh != "" { + one(*cmh, curryTestPrologue("simd methods that compare two operands under a mask"), compareMaskedTemplate) + } +} + +func one(filename string, prologue func(s string, out io.Writer), t *template.Template) { + if filename == "" { + return + } + + ofile := os.Stdout + + if filename != "-" { + var err error + ofile, err = os.Create(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not create the output file %s for the generated code, %v", filename, err) + os.Exit(1) + } + } + + out := new(bytes.Buffer) + + prologue("go run genfiles.go", out) + forTemplates(t, out) + + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v", filename, err) + os.Exit(1) + } else { + ofile.Write(b) + ofile.Close() + } + +} diff --git a/src/simd/genslice.go b/src/simd/genslice.go deleted file mode 100644 index 77b9b41c09754e..00000000000000 --- a/src/simd/genslice.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ignore - -package main - -// this generates all the code to load and store simd -// vectors to/from slices. - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io" - "os" - "strings" -) - -// //go:noescape -// func LoadUint8x16Slice(s []uint8) Uint8x16 { -// return LoadUint8x16((*[16]uint8)(s[:16])) -// } - -// //go:noescape -// func (x Uint8x16) StoreSlice(s []uint8) { -// x.Store((*[16]uint8)(s[:16])) -// } - -func slice(e string, w, c int, out io.Writer) { - b := w * c - if b < 128 || b > 512 { - return - } - E := strings.ToUpper(e[:1]) + e[1:] - t := fmt.Sprintf("%s%d", e, w) - v := fmt.Sprintf("%s%dx%d", E, w, c) - a := "a" - if strings.Contains("aeiou", e[:1]) { - a = "an" - } - fmt.Fprintf(out, - ` -// Load%sSlice loads %s %s from a slice of at least %d %ss -func Load%sSlice(s []%s) %s { - return Load%s((*[%d]%s)(s)) -} -`, v, a, v, c, t, v, t, v, v, c, t) - - fmt.Fprintf(out, - ` -// StoreSlice stores x into a slice of at least %d %ss -func (x %s) StoreSlice(s []%s) { - x.Store((*[%d]%s)(s)) -} -`, c, t, v, t, c, t) - -} - -func prologue(s string, out io.Writer) { - fmt.Fprintf(out, - `// Code generated by '%s'; DO NOT EDIT. - -//go:build goexperiment.simd - -// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain -// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. - -package simd - -`, s) -} - -func main() { - filename := flag.String("o", "", "write generated code to this file") - flag.Parse() - - ofile := os.Stdout - - if *filename != "" { - var err error - ofile, err = os.Create(*filename) - if err != nil { - fmt.Fprintf(os.Stderr, "Could not create the output file for the generated code, %v", err) - os.Exit(1) - } - } - - out := new(bytes.Buffer) - - prologue("go run genslice.go -o slice_amd64.go", out) - - vecs := []int{128, 256, 512} - ints := []int{8, 16, 32, 64} - floats := []int{32, 64} - for _, v := range vecs { - for _, w := range ints { - c := v / w - slice("int", w, c, out) - slice("uint", w, c, out) - } - for _, w := range floats { - c := v / w - slice("float", w, c, out) - } - } - b, err := format.Source(out.Bytes()) - if err != nil { - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code, %v", err) - os.Exit(1) - } else { - ofile.Write(b) - ofile.Close() - } -} diff --git a/src/simd/helpers_test.go b/src/simd/helpers_test.go new file mode 100644 index 00000000000000..14490a84b2a9e0 --- /dev/null +++ b/src/simd/helpers_test.go @@ -0,0 +1,299 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "math" + "testing" +) + +type signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +type integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +type float interface { + ~float32 | ~float64 +} + +type number interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 +} + +func checkSlices[T number](t *testing.T, got, want []T) bool { + t.Helper() + return checkSlicesLogInput[T](t, got, want, nil) +} + +// checkSlices compares two slices for equality, +// reporting a test error if there is a problem, +// and also consumes the two slices so that a +// test/benchmark won't be dead-code eliminated. +func checkSlicesLogInput[T number](t *testing.T, got, want []T, logInput func()) bool { + t.Helper() + var z T + for i := range want { + if got[i] != want[i] { + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { + continue + } + case float64: + y := ib.(float64) + if math.IsNaN(x) && math.IsNaN(y) { + continue + } + default: + } + + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, got=%v, want=%v", i, got[i], want[i]) + return false + } else if got[i] == 0 { // for floating point, 0.0 == -0.0 but a bitwise check can see the difference + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.Float32bits(x) != math.Float32bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + case float64: + y := ib.(float64) + if math.Float64bits(x) != math.Float64bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + default: + } + + } + } + return true +} + +// sliceOf returns a slice n T's, with each +// element of the slice initialized to its +// index + 1. +func sliceOf[T number](n int) []T { + s := make([]T, n) + for i := 0; i < n; i++ { + s[i] = T(i + 1) + } + return s +} + +func toVect[T signed](b []bool) []T { + s := make([]T, len(b)) + for i := range b { + if b[i] { + s[i] = -1 + } + } + return s +} + +// s64 converts a slice of some integer type into a slice of int64 +func s64[T number](s []T) []int64 { + var is any = s + if r, ok := is.([]int64); ok { + return r + } + r := make([]int64, len(s)) + for i := range s { + r[i] = int64(s[i]) + } + return r +} + +// Do implements slice part testing. It repeatedly calls +// body on smaller and smaller slices and an output slice +// for the result, then compares the result to its own +// calculation of what the result should be. +func Do[T number](t *testing.T, n int, body func(a, c []T)) { + a := sliceOf[T](n) + b := sliceOf[T](n) + + for i := n; i >= 0; i-- { + c := make([]T, n, n) + body(a[:i], c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = T(0) + } + } +} + +// map3 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its 3 slice inputs. +func map3[T, U any](elem func(x, y, z T) U) func(x, y, z []T) []U { + return func(x, y, z []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i], y[i], z[i]) + } + return s + } +} + +// map2 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its 2 slice inputs. +func map2[T, U any](elem func(x, y T) U) func(x, y []T) []U { + return func(x, y []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i], y[i]) + } + return s + } +} + +// map1 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its single slice input. +func map1[T, U any](elem func(x T) U) func(x []T) []U { + return func(x []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i]) + } + return s + } +} + +// map1 returns a function that returns the slice of the results of applying +// comparison function elem to the respective elements of its two slice inputs. +func mapCompare[T number](elem func(x, y T) bool) func(x, y []T) []int64 { + return func(x, y []T) []int64 { + s := make([]int64, len(x)) + for i := range s { + if elem(x[i], y[i]) { + s[i] = -1 + } + } + return s + } +} + +// nOf returns a slice of length n whose elements are taken +// from input slice s. +func nOf[T any](n int, s []T) []T { + if len(s) >= n { + return s + } + r := make([]T, n) + for i := range r { + r[i] = s[i%len(s)] + } + return r +} + +const ( + PN22 = 1.0 / 1024 / 1024 / 4 + PN24 = 1.0 / 1024 / 1024 / 16 + PN53 = PN24 * PN24 / 32 + F0 = float32(1.0 + 513*PN22/2) + F1 = float32(1.0 + 511*PN22*8) + Aeasy = float32(2046 * PN53) + Ahard = float32(2047 * PN53) // 2047 provokes a 2-rounding in 64-bit FMA rounded to 32-bit +) + +var zero = 0.0 +var nan = math.NaN() + +// N controls how large the test vectors are +const N = 144 + +var float32s = nOf(N, []float32{1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1 / zero), float32(-1 / zero), 1 / 2, 1 / 4, 1 / 8, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) +var float64s = nOf(N, []float64{nan, zero, -zero, 1 / zero, -1 / zero, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) + +var int32s = nOf(N, []int32{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) +var uint32s = nOf(N, []uint32{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint32(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint32(0x55555), ^uint32(0x77777), ^uint32(0xccccc)}) + +var int64s = nOf(N, []int64{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) +var uint64s = nOf(N, []uint64{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint64(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint64(0x55555), ^uint64(0x77777), ^uint64(0xccccc)}) + +var int16s = nOf(N, []int16{1, -1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, -32767, -32768, -11111, -4, -8, -16, -32, -64}) +var uint16s = nOf(N, []uint16{1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, 32768, 65535, 45678, 56789}) + +var int8s = nOf(N, []int8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, -1, -2, -3, -5, -7, -11, -77, -121, -127, -128, 4, 8, 16, 32, 64, -4, -8, -16, -32, -64}) +var uint8s = nOf(N, []uint8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, 128, 255, 233, 211, 177, 144, 4, 8, 16, 32, 64}) + +var bools = nOf(N, []bool{ + true, false, true, true, false, false, true, true, true, false, false, false, true, true, true, true, false, false, false, false}) + +func forSlice[T number](t *testing.T, s []T, n int, f func(a []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i++ { + if !f(s[i : i+n]) { + return + } + } +} + +func forSlicePair[T number](t *testing.T, s []T, n int, f func(a, b []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i++ { + for j := 0; j < len(s)-n; j++ { + if !f(s[i:i+n], s[j:j+n]) { + return + } + } + } +} + +func forSliceTriple[T number](t *testing.T, s []T, n int, f func(a, b, c []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i += 3 { + for j := 0; j < len(s)-n; j += 3 { + for k := 0; k < len(s)-n; k += 3 { + if !f(s[i:i+n], s[j:j+n], s[k:k+n]) { + return + } + } + } + } +} + +func forSlicePairMasked[T number](t *testing.T, s []T, n int, f func(a, b []T, m []bool) bool) { + t.Helper() + m := bools + // Step slice pair masked forward much more quickly, otherwise it is slooooow + for i := 0; i < len(s)-n; i += 3 { + for j := 0; j < len(s)-n; j += 3 { + for k := 0; k < len(m)-n; k += 3 { + if !f(s[i:i+n], s[j:j+n], m[k:k+n]) { + return + } + } + } + } +} diff --git a/src/simd/no_tag.go b/src/simd/no_tag.go index c11fd51b2345d6..976a2155d9f3ab 100644 --- a/src/simd/no_tag.go +++ b/src/simd/no_tag.go @@ -6,4 +6,4 @@ package simd // This file has no build tag, so that go generate can run without a build tag. -//go:generate go run genslice.go -o slice_amd64.go +//go:generate go run genfiles.go diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index d4f539eea2c1a3..06af3458b5280c 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -9,6 +9,7 @@ package simd_test import ( "reflect" "simd" + "slices" "testing" ) @@ -135,22 +136,6 @@ func TestMaskConversion(t *testing.T) { } } -func TestAdd(t *testing.T) { - testInt32x4Binary(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{6, 8, 10, 12}, "Add") -} - -func TestSub(t *testing.T) { - testInt32x4Binary(t, []int32{5, 5, 5, 3}, []int32{3, 3, 3, 3}, []int32{2, 2, 2, 0}, "Sub") -} - -func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "AddMasked") -} - func TestPermute(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") @@ -191,15 +176,15 @@ func TestCompress(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } - testInt32x4Mask32x4Int32x4(t, []int32{1, 2, 3, 4}, - []int32{0, -1, 0, -1}, - []int32{2, 4, 0, 0}, "Compress") -} - -func TestAndNot(t *testing.T) { - testInt32x4Binary(t, []int32{0b11, 0b00, 0b11, 0b00}, - []int32{0b01, 0b01, 0b01, 0b01}, - []int32{0b10, 0b00, 0b10, 0b00}, "AndNot") + v1234 := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + v0101 := simd.LoadInt32x4Slice([]int32{0, -1, 0, -1}) + v2400 := v1234.Compress(v0101.AsMask32x4()) + got := make([]int32, 4) + v2400.StoreSlice(got) + want := []int32{2, 4, 0, 0} + if !slices.Equal(got, want) { + t.Errorf("want and got differ, want=%v, got=%v", want, got) + } } func TestPairDotProdAccumulate(t *testing.T) { @@ -231,53 +216,13 @@ func checkInt8Slices(t *testing.T, a, b []int8) { } } -func checkUint8Slices(t *testing.T, a, b []uint8) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - -func checkInt16Slices(t *testing.T, a, b []int16) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - -func checkUint16Slices(t *testing.T, a, b []uint16) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - -func checkFloat32Slices(t *testing.T, a, b []float32) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) - } - } -} - -func checkFloat64Slices(t *testing.T, a, b []float64) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) - } - } -} - func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} v := simd.LoadInt8x32Slice(a) b := make([]int8, 32, 32) v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8SetElem(t *testing.T) { @@ -290,7 +235,7 @@ func TestSlicesInt8SetElem(t *testing.T) { b := make([]int8, 16, 16) v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8GetElem(t *testing.T) { @@ -315,8 +260,8 @@ func TestSlicesInt8Set128(t *testing.T) { b := make([]int8, 32, 32) w.StoreSlice(b) - checkInt8Slices(t, a, b[:16]) - checkInt8Slices(t, a, b[16:]) + checkSlices(t, a, b[:16]) + checkSlices(t, a, b[16:]) } func TestSlicesInt8Get128(t *testing.T) { @@ -330,7 +275,7 @@ func TestSlicesInt8Get128(t *testing.T) { v.StoreSlice(b[:16]) w.StoreSlice(b[16:]) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesFloat32Set128(t *testing.T) { @@ -344,8 +289,8 @@ func TestSlicesFloat32Set128(t *testing.T) { b := make([]float32, 8, 8) w.StoreSlice(b) - checkFloat32Slices(t, a, b[:4]) - checkFloat32Slices(t, a, b[4:]) + checkSlices(t, a, b[:4]) + checkSlices(t, a, b[4:]) } func TestSlicesFloat32Get128(t *testing.T) { @@ -359,7 +304,7 @@ func TestSlicesFloat32Get128(t *testing.T) { v.StoreSlice(b[:4]) w.StoreSlice(b[4:]) - checkFloat32Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesFloat64Set128(t *testing.T) { @@ -373,8 +318,8 @@ func TestSlicesFloat64Set128(t *testing.T) { b := make([]float64, 4, 4) w.StoreSlice(b) - checkFloat64Slices(t, a, b[:2]) - checkFloat64Slices(t, a, b[2:]) + checkSlices(t, a, b[:2]) + checkSlices(t, a, b[2:]) } func TestSlicesFloat64Get128(t *testing.T) { @@ -388,7 +333,7 @@ func TestSlicesFloat64Get128(t *testing.T) { v.StoreSlice(b[:2]) w.StoreSlice(b[2:]) - checkFloat64Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8TooShortLoad(t *testing.T) { @@ -404,7 +349,7 @@ func TestSlicesInt8TooShortLoad(t *testing.T) { v := simd.LoadInt8x32Slice(a) b := make([]int8, 32, 32) v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8TooShortStore(t *testing.T) { @@ -420,7 +365,7 @@ func TestSlicesInt8TooShortStore(t *testing.T) { v := simd.LoadInt8x32Slice(a) b := make([]int8, 31) // TOO SHORT, should panic v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesFloat64(t *testing.T) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go deleted file mode 100644 index d46c05e5290c4e..00000000000000 --- a/src/simd/simd_wrapped_test.go +++ /dev/null @@ -1,8021 +0,0 @@ -// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. - -//go:build goexperiment.simd - -package simd_test - -import ( - "simd" - "testing" -) - -func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "DotProdBroadcast": - gotv = vec0.DotProdBroadcast(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask32x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x4()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask32x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Mask32x4Float32x4(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadFloat32x4Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadFloat32x4Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x4()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x4()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x4()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x4()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "DotProdBroadcast": - gotv = vec0.DotProdBroadcast(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask32x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x8()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask32x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x8() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x8() - case "Less": - gotv = vec0.Less(vec1).AsInt32x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x8() - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Mask32x8Float32x8(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadFloat32x8Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadFloat32x8Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x8()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x8()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x8()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x8()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask32x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x16()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask32x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Mask32x16Float32x16(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x16()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x16()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Sqrt": - gotv = vec0.Sqrt() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x16()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x16()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "DotProdBroadcast": - gotv = vec0.DotProdBroadcast(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask64x2()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x2()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask64x2()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt64x2() - case "Less": - gotv = vec0.Less(vec1).AsInt64x2() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Mask64x2Float64x2(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadFloat64x2Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadFloat64x2Slice(v2) - vec3 := simd.LoadInt64x2Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x2()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x2()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Unary(t *testing.T, v0 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x2()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x2()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask64x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x4()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask64x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Mask64x4Float64x4(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadFloat64x4Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadFloat64x4Slice(v2) - vec3 := simd.LoadInt64x4Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x4()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x4()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Unary(t *testing.T, v0 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x4()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x4()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask64x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x8()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask64x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Mask64x8Float64x8(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadFloat64x8Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadFloat64x8Slice(v2) - vec3 := simd.LoadInt64x8Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x8()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x8()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Unary(t *testing.T, v0 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Sqrt": - gotv = vec0.Sqrt() - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x8()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x8()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x16() - case "Less": - gotv = vec0.Less(vec1).AsInt8x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x16() - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Mask8x16Int8x16(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask8x16()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x32() - case "Less": - gotv = vec0.Less(vec1).AsInt8x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x32() - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Mask8x32Int8x32(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask8x32()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x64() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x64() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x64() - case "Less": - gotv = vec0.Less(vec1).AsInt8x64() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x64() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x64() - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Mask8x64Int8x64(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask8x64()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedPairwiseAdd": - gotv = vec0.SaturatedPairwiseAdd(vec1) - case "SaturatedPairwiseSub": - gotv = vec0.SaturatedPairwiseSub(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x8()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "PairDotProdMasked": - gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x8() - case "Less": - gotv = vec0.Less(vec1).AsInt16x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x8() - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Mask16x8Int16x8(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - vec3 := simd.LoadInt16x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Unary(t *testing.T, v0 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask16x8()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedPairwiseAdd": - gotv = vec0.SaturatedPairwiseAdd(vec1) - case "SaturatedPairwiseSub": - gotv = vec0.SaturatedPairwiseSub(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "PairDotProdMasked": - gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() - case "Less": - gotv = vec0.Less(vec1).AsInt16x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Mask16x16Int16x16(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - vec3 := simd.LoadInt16x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask16x16()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "PairDotProdMasked": - gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() - case "Less": - gotv = vec0.Less(vec1).AsInt16x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Mask16x32Int16x32(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - vec3 := simd.LoadInt16x32Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask16x32()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Unary(t *testing.T, v0 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask32x4()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x8() - case "Less": - gotv = vec0.Less(vec1).AsInt32x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x8() - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Unary(t *testing.T, v0 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask32x8()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x16()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask32x16()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x2()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() - case "Less": - gotv = vec0.Less(vec1).AsInt64x2() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Mask64x2Int64x2(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - vec3 := simd.LoadInt64x2Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask64x2()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Mask64x4Int64x4(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - vec3 := simd.LoadInt64x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Unary(t *testing.T, v0 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask64x4()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Mask64x8Int64x8(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - vec3 := simd.LoadInt64x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Unary(t *testing.T, v0 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask64x8()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "GaloisFieldMul": - gotv = vec0.GaloisFieldMul(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask8x16()) - case "GaloisFieldMulMasked": - gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x16() - case "Less": - gotv = vec0.Less(vec1).AsInt8x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "SaturatedUnsignedSignedPairDotProdMasked": - gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Mask8x16Uint8x16(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "GaloisFieldMul": - gotv = vec0.GaloisFieldMul(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask8x32()) - case "GaloisFieldMulMasked": - gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x32() - case "Less": - gotv = vec0.Less(vec1).AsInt8x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x32() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "SaturatedUnsignedSignedPairDotProdMasked": - gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Mask8x32Uint8x32(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "GaloisFieldMul": - gotv = vec0.GaloisFieldMul(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask8x64()) - case "GaloisFieldMulMasked": - gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x64()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x64() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x64() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x64() - case "Less": - gotv = vec0.Less(vec1).AsInt8x64() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x64() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x64() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "SaturatedUnsignedSignedPairDotProdMasked": - gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Mask8x64Uint8x64(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask16x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x8() - case "Less": - gotv = vec0.Less(vec1).AsInt16x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x8() - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Mask16x8Uint16x8(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadUint16x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadUint16x8Slice(v2) - vec3 := simd.LoadInt16x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask16x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() - case "Less": - gotv = vec0.Less(vec1).AsInt16x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Mask16x16Uint16x16(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadUint16x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadUint16x16Slice(v2) - vec3 := simd.LoadInt16x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask16x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() - case "Less": - gotv = vec0.Less(vec1).AsInt16x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Mask16x32Uint16x32(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadUint16x32Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadUint16x32Slice(v2) - vec3 := simd.LoadInt16x32Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadUint32x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadUint32x4Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x8() - case "Less": - gotv = vec0.Less(vec1).AsInt32x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x8() - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadUint32x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadUint32x8Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadUint32x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadUint32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() - case "Less": - gotv = vec0.Less(vec1).AsInt64x2() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Mask64x2Uint64x2(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadUint64x2Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadUint64x2Slice(v2) - vec3 := simd.LoadInt64x2Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Mask64x4Uint64x4(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadUint64x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadUint64x4Slice(v2) - vec3 := simd.LoadInt64x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Mask64x8Uint64x8(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadUint64x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadUint64x8Slice(v2) - vec3 := simd.LoadInt64x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -/* The operations below cannot be tested via wrappers, please test them directly */ - -// CeilWithPrecision -// CeilWithPrecisionMasked -// DiffWithCeilWithPrecision -// DiffWithCeilWithPrecisionMasked -// DiffWithFloorWithPrecision -// DiffWithFloorWithPrecisionMasked -// DiffWithRoundWithPrecision -// DiffWithRoundWithPrecisionMasked -// DiffWithTruncWithPrecision -// DiffWithTruncWithPrecisionMasked -// FloorWithPrecision -// FloorWithPrecisionMasked -// GaloisFieldAffineTransform -// GaloisFieldAffineTransformInverse -// GaloisFieldAffineTransformInverseMasked -// GaloisFieldAffineTransformMasked -// Get128 -// GetElem -// PairDotProdAccumulate -// PairDotProdAccumulateMasked -// Permute -// Permute2 -// Permute2Masked -// PermuteMasked -// RotateAllLeft -// RotateAllLeftMasked -// RotateAllRight -// RotateAllRightMasked -// RoundWithPrecision -// RoundWithPrecisionMasked -// SaturatedPairDotProdAccumulate -// SaturatedPairDotProdAccumulateMasked -// SaturatedUnsignedSignedQuadDotProdAccumulate -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked -// Set128 -// SetElem -// ShiftAllLeft -// ShiftAllLeftAndFillUpperFrom -// ShiftAllLeftAndFillUpperFromMasked -// ShiftAllLeftMasked -// ShiftAllRight -// ShiftAllRightAndFillUpperFrom -// ShiftAllRightAndFillUpperFromMasked -// ShiftAllRightMasked -// TruncWithPrecision -// TruncWithPrecisionMasked -// UnsignedSignedQuadDotProdAccumulate -// UnsignedSignedQuadDotProdAccumulateMasked diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go new file mode 100644 index 00000000000000..1def39cd92bdf9 --- /dev/null +++ b/src/simd/simulation_helpers_test.go @@ -0,0 +1,204 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import "math" + +func less[T number](x, y T) bool { + return x < y +} +func lessEqual[T number](x, y T) bool { + return x <= y +} +func greater[T number](x, y T) bool { + return x > y +} +func greaterEqual[T number](x, y T) bool { + return x >= y +} +func equal[T number](x, y T) bool { + return x == y +} +func notEqual[T number](x, y T) bool { + return x != y +} + +func abs[T number](x T) T { + // TODO this will need a non-standard FP-equality test. + if x == 0 { // true if x is -0. + return x // this is not a negative zero + } + if x < 0 { + return -x + } + return x +} + +func ceil[T float](x T) T { + return T(math.Ceil(float64(x))) +} +func floor[T float](x T) T { + return T(math.Floor(float64(x))) +} +func not[T integer](x T) T { + return ^x +} +func round[T float](x T) T { + return T(math.RoundToEven(float64(x))) +} +func sqrt[T float](x T) T { + return T(math.Sqrt(float64(x))) +} +func trunc[T float](x T) T { + return T(math.Trunc(float64(x))) +} + +func add[T number](x, y T) T { + return x + y +} + +func sub[T number](x, y T) T { + return x - y +} + +func max_[T number](x, y T) T { // "max" lands in infinite recursion + return max(x, y) +} + +func min_[T number](x, y T) T { // "min" lands in infinite recursion + return min(x, y) +} + +// Also mulLow for integers +func mul[T number](x, y T) T { + return x * y +} + +func div[T number](x, y T) T { + return x / y +} + +func and[T integer](x, y T) T { + return x & y +} + +func andNotI[T integer](x, y T) T { + return x & ^y // order corrected to match expectations +} + +func orI[T integer](x, y T) T { + return x | y +} + +func xorI[T integer](x, y T) T { + return x ^ y +} + +func ima[T integer](x, y, z T) T { + return x*y + z +} + +func fma[T float](x, y, z T) T { + return T(math.FMA(float64(x), float64(y), float64(z))) +} + +func addSlice[T number](x, y []T) []T { + return map2[T](add)(x, y) +} + +func subSlice[T number](x, y []T) []T { + return map2[T](sub)(x, y) +} + +func maxSlice[T number](x, y []T) []T { + return map2[T](max_)(x, y) +} + +func minSlice[T number](x, y []T) []T { + return map2[T](min_)(x, y) +} + +// mulLow for integers +func mulSlice[T number](x, y []T) []T { + return map2[T](mul)(x, y) +} + +func divSlice[T number](x, y []T) []T { + return map2[T](div)(x, y) +} + +func andSlice[T integer](x, y []T) []T { + return map2[T](and)(x, y) +} + +func andNotSlice[T integer](x, y []T) []T { + return map2[T](andNotI)(x, y) +} + +func orSlice[T integer](x, y []T) []T { + return map2[T](orI)(x, y) +} + +func xorSlice[T integer](x, y []T) []T { + return map2[T](xorI)(x, y) +} + +func lessSlice[T number](x, y []T) []int64 { + return mapCompare[T](less)(x, y) +} + +func lessEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](lessEqual)(x, y) +} + +func greaterSlice[T number](x, y []T) []int64 { + return mapCompare[T](greater)(x, y) +} + +func greaterEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](greaterEqual)(x, y) +} + +func equalSlice[T number](x, y []T) []int64 { + return mapCompare[T](equal)(x, y) +} + +func notEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](notEqual)(x, y) +} + +func ceilSlice[T float](x []T) []T { + return map1[T](ceil)(x) +} + +func floorSlice[T float](x []T) []T { + return map1[T](floor)(x) +} + +func notSlice[T integer](x []T) []T { + return map1[T](not)(x) +} + +func roundSlice[T float](x []T) []T { + return map1[T](round)(x) +} + +func sqrtSlice[T float](x []T) []T { + return map1[T](sqrt)(x) +} + +func truncSlice[T float](x []T) []T { + return map1[T](trunc)(x) +} + +func imaSlice[T integer](x, y, z []T) []T { + return map3[T](ima)(x, y, z) +} + +func fmaSlice[T float](x, y, z []T) []T { + return map3[T](fma)(x, y, z) +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index 10050e6b9f82a2..62564e44a2185a 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -1,10 +1,7 @@ -// Code generated by 'go run genslice.go -o slice_amd64.go'; DO NOT EDIT. +// Code generated by 'go run genfiles.go'; DO NOT EDIT. //go:build goexperiment.simd -// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain -// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. - package simd // LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index 8f10ea630b726f..6e047248790711 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -12,17 +12,10 @@ import ( ) func TestSlicePartInt8x16(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - u := simd.LoadInt8x16SlicePart(a[:i]) - c := make([]int8, 32, 32) + Do(t, 16, func(a, c []int8) { + u := simd.LoadInt8x16SlicePart(a) u.StoreSlice(c) - checkInt8Slices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } + }) } func TestSlicePartInt8x32(t *testing.T) { @@ -34,7 +27,7 @@ func TestSlicePartInt8x32(t *testing.T) { u := simd.LoadInt8x32SlicePart(a[:i]) c := make([]int8, 32, 32) u.StoreSlice(c) - checkInt8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -48,7 +41,7 @@ func TestSlicePartUint8x16(t *testing.T) { u := simd.LoadUint8x16SlicePart(a[:i]) c := make([]uint8, 32, 32) u.StoreSlice(c) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -64,7 +57,7 @@ func TestSlicePartUint8x32(t *testing.T) { u := simd.LoadUint8x32SlicePart(a[:i]) c := make([]uint8, 32, 32) u.StoreSlice(c) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -78,7 +71,7 @@ func TestSlicePartInt16x8(t *testing.T) { u := simd.LoadInt16x8SlicePart(a[:i]) c := make([]int16, 16, 16) u.StoreSlice(c) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -92,7 +85,7 @@ func TestSlicePartInt16x16(t *testing.T) { u := simd.LoadInt16x16SlicePart(a[:i]) c := make([]int16, 16, 16) u.StoreSlice(c) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -106,7 +99,7 @@ func TestSlicesPartStoreInt8x16(t *testing.T) { v := simd.LoadInt8x16Slice(a) c := make([]int8, 32, 32) v.StoreSlicePart(c[:i]) - checkInt8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -120,7 +113,7 @@ func TestSlicesPartStoreInt16x8(t *testing.T) { v := simd.LoadInt16x8Slice(a) c := make([]int16, 32, 32) v.StoreSlicePart(c[:i]) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -134,7 +127,7 @@ func TestSlicesPartStoreInt16x16(t *testing.T) { v := simd.LoadInt16x16Slice(a) c := make([]int16, 32, 32) v.StoreSlicePart(c[:i]) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -148,7 +141,7 @@ func TestSlicesPartStoreUint8x16(t *testing.T) { v := simd.LoadUint8x16Slice(a) c := make([]uint8, 32, 32) v.StoreSlicePart(c[:i]) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -162,7 +155,7 @@ func TestSlicesPartStoreUint16x16(t *testing.T) { v := simd.LoadUint16x16Slice(a) c := make([]uint16, 32, 32) v.StoreSlicePart(c[:i]) - checkUint16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -178,7 +171,7 @@ func TestSlicesPartStoreUint8x32(t *testing.T) { v := simd.LoadUint8x32Slice(a) c := make([]uint8, 32, 32) v.StoreSlicePart(c[:i]) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } diff --git a/src/simd/ternary_helpers_test.go b/src/simd/ternary_helpers_test.go new file mode 100644 index 00000000000000..5a7503860f0a25 --- /dev/null +++ b/src/simd/ternary_helpers_test.go @@ -0,0 +1,494 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing ternary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, want func(_, _, _ []int8) []int8) { + n := 16 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + c := simd.LoadInt8x16Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + c := simd.LoadUint8x16Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, want func(_, _, _ []int16) []int16) { + n := 8 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + c := simd.LoadInt16x8Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + c := simd.LoadUint16x8Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, want func(_, _, _ []int32) []int32) { + n := 4 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + c := simd.LoadInt32x4Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + c := simd.LoadUint32x4Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, want func(_, _, _ []int64) []int64) { + n := 2 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + c := simd.LoadInt64x2Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64x2, want func(_, _, _ []uint64) []uint64) { + n := 2 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + c := simd.LoadUint64x2Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x4Ternary(t *testing.T, f func(_, _, _ simd.Float32x4) simd.Float32x4, want func(_, _, _ []float32) []float32) { + n := 4 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + c := simd.LoadFloat32x4Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x2Ternary(t *testing.T, f func(_, _, _ simd.Float64x2) simd.Float64x2, want func(_, _, _ []float64) []float64) { + n := 2 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + c := simd.LoadFloat64x2Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, want func(_, _, _ []int8) []int8) { + n := 32 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + c := simd.LoadInt8x32Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + c := simd.LoadUint8x32Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x16, want func(_, _, _ []int16) []int16) { + n := 16 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + c := simd.LoadInt16x16Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + c := simd.LoadUint16x16Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, want func(_, _, _ []int32) []int32) { + n := 8 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + c := simd.LoadInt32x8Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + c := simd.LoadUint32x8Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, want func(_, _, _ []int64) []int64) { + n := 4 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + c := simd.LoadInt64x4Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64x4, want func(_, _, _ []uint64) []uint64) { + n := 4 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + c := simd.LoadUint64x4Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x8Ternary(t *testing.T, f func(_, _, _ simd.Float32x8) simd.Float32x8, want func(_, _, _ []float32) []float32) { + n := 8 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + c := simd.LoadFloat32x8Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x4Ternary(t *testing.T, f func(_, _, _ simd.Float64x4) simd.Float64x4, want func(_, _, _ []float64) []float64) { + n := 4 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + c := simd.LoadFloat64x4Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, want func(_, _, _ []int8) []int8) { + n := 64 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + c := simd.LoadInt8x64Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + c := simd.LoadUint8x64Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x32, want func(_, _, _ []int16) []int16) { + n := 32 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + c := simd.LoadInt16x32Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + c := simd.LoadUint16x32Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x16, want func(_, _, _ []int32) []int32) { + n := 16 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + c := simd.LoadInt32x16Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + c := simd.LoadUint32x16Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, want func(_, _, _ []int64) []int64) { + n := 8 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + c := simd.LoadInt64x8Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64x8, want func(_, _, _ []uint64) []uint64) { + n := 8 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + c := simd.LoadUint64x8Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x16Ternary(t *testing.T, f func(_, _, _ simd.Float32x16) simd.Float32x16, want func(_, _, _ []float32) []float32) { + n := 16 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + c := simd.LoadFloat32x16Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x8Ternary(t *testing.T, f func(_, _, _ simd.Float64x8) simd.Float64x8, want func(_, _, _ []float64) []float64) { + n := 8 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + c := simd.LoadFloat64x8Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} diff --git a/src/simd/ternary_test.go b/src/simd/ternary_test.go new file mode 100644 index 00000000000000..afca850d6147e8 --- /dev/null +++ b/src/simd/ternary_test.go @@ -0,0 +1,23 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestFMA(t *testing.T) { + if simd.HasAVX512() { + testFloat32x4Ternary(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32]) + testFloat32x8Ternary(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32]) + testFloat32x16Ternary(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32]) + testFloat64x2Ternary(t, simd.Float64x2.FusedMultiplyAdd, fmaSlice[float64]) + testFloat64x4Ternary(t, simd.Float64x4.FusedMultiplyAdd, fmaSlice[float64]) + testFloat64x8Ternary(t, simd.Float64x8.FusedMultiplyAdd, fmaSlice[float64]) + } +} diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go new file mode 100644 index 00000000000000..2ee39b9a222915 --- /dev/null +++ b/src/simd/unary_helpers_test.go @@ -0,0 +1,434 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing unary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want func(_ []int8) []int8) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { + n := 4 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { + n := 4 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { + n := 2 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x2Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x2Unary(t *testing.T, f func(_ simd.Uint64x2) simd.Uint64x2, want func(_ []uint64) []uint64) { + n := 2 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x4Unary(t *testing.T, f func(_ simd.Float32x4) simd.Float32x4, want func(_ []float32) []float32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x2Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x2Unary(t *testing.T, f func(_ simd.Float64x2) simd.Float64x2, want func(_ []float64) []float64) { + n := 2 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want func(_ []int8) []int8) { + n := 32 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { + n := 32 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { + n := 4 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x4Unary(t *testing.T, f func(_ simd.Uint64x4) simd.Uint64x4, want func(_ []uint64) []uint64) { + n := 4 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x8Unary(t *testing.T, f func(_ simd.Float32x8) simd.Float32x8, want func(_ []float32) []float32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x4Unary(t *testing.T, f func(_ simd.Float64x4) simd.Float64x4, want func(_ []float64) []float64) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x64Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want func(_ []int8) []int8) { + n := 64 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { + n := 64 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { + n := 32 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { + n := 32 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x8Unary(t *testing.T, f func(_ simd.Uint64x8) simd.Uint64x8, want func(_ []uint64) []uint64) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x16Unary(t *testing.T, f func(_ simd.Float32x16) simd.Float32x16, want func(_ []float32) []float32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, want func(_ []float64) []float64) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go new file mode 100644 index 00000000000000..be6a0909bec937 --- /dev/null +++ b/src/simd/unary_test.go @@ -0,0 +1,84 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestCeil(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Ceil, ceilSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Ceil, ceilSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Ceil, ceilSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Ceil, ceilSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Ceil, ceilSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Ceil, ceilSlice[float64]) // missing + } +} + +func TestFloor(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Floor, floorSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Floor, floorSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Floor, floorSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Floor, floorSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Floor, floorSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Floor, floorSlice[float64]) // missing + } +} + +func TestTrunc(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Trunc, truncSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Trunc, truncSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Trunc, truncSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Trunc, truncSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Trunc, truncSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Trunc, truncSlice[float64]) // missing + } +} + +func TestRound(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Round, roundSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Round, roundSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Round, roundSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Round, roundSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Round, roundSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Round, roundSlice[float64]) // missing + } +} + +func TestSqrt(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Sqrt, sqrtSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Sqrt, sqrtSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Sqrt, sqrtSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Sqrt, sqrtSlice[float64]) + if simd.HasAVX512() { + testFloat32x16Unary(t, simd.Float32x16.Sqrt, sqrtSlice[float32]) + testFloat64x8Unary(t, simd.Float64x8.Sqrt, sqrtSlice[float64]) + } +} + +func TestAbsolute(t *testing.T) { + testInt8x16Unary(t, simd.Int8x16.Absolute, map1[int8](abs)) + testInt8x32Unary(t, simd.Int8x32.Absolute, map1[int8](abs)) + testInt16x8Unary(t, simd.Int16x8.Absolute, map1[int16](abs)) + testInt16x16Unary(t, simd.Int16x16.Absolute, map1[int16](abs)) + testInt32x4Unary(t, simd.Int32x4.Absolute, map1[int32](abs)) + testInt32x8Unary(t, simd.Int32x8.Absolute, map1[int32](abs)) + if simd.HasAVX512() { + testInt8x64Unary(t, simd.Int8x64.Absolute, map1[int8](abs)) + testInt16x32Unary(t, simd.Int16x32.Absolute, map1[int16](abs)) + testInt32x16Unary(t, simd.Int32x16.Absolute, map1[int32](abs)) + testInt64x2Unary(t, simd.Int64x2.Absolute, map1[int64](abs)) + testInt64x4Unary(t, simd.Int64x4.Absolute, map1[int64](abs)) + testInt64x8Unary(t, simd.Int64x8.Absolute, map1[int64](abs)) + } +} From a0b87a7478bb131efbbe9bb2ba6451d1b16ed0bf Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 16 Jul 2025 13:29:14 -0400 Subject: [PATCH 096/139] [dev.simd] cmd/compile: changes for AVX2 SIMD masked load/store This is "glue" changes and hand work for the AVX2 masked loads/stores. Does not include generated function/method declarations or intrinsic registration. Change-Id: Ic95f90b117d0c471f174407ce3f729f1f517b23c Reviewed-on: https://go-review.googlesource.com/c/go/+/689295 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 18 ++ src/cmd/compile/internal/ssa/_gen/AMD64.rules | 13 +- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 17 ++ .../compile/internal/ssa/_gen/genericOps.go | 8 + src/cmd/compile/internal/ssa/func.go | 13 ++ src/cmd/compile/internal/ssa/opGen.go | 162 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 158 +++++++++++++++++ src/cmd/compile/internal/ssagen/intrinsics.go | 13 ++ src/cmd/compile/internal/ssagen/ssa.go | 5 + 9 files changed, 404 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 7338c16cdad9b0..efa7895e97d35e 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1476,6 +1476,24 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Reg = v.Args[0].Reg() ssagen.AddAux(&p.To, v) + case ssa.OpAMD64VPMASK32load128, ssa.OpAMD64VPMASK64load128, ssa.OpAMD64VPMASK32load256, ssa.OpAMD64VPMASK64load256: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + p.AddRestSourceReg(simdReg(v.Args[1])) // masking simd reg + + case ssa.OpAMD64VPMASK32store128, ssa.OpAMD64VPMASK64store128, ssa.OpAMD64VPMASK32store256, ssa.OpAMD64VPMASK64store256: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + p.AddRestSourceReg(simdReg(v.Args[1])) // masking simd reg + case ssa.OpAMD64VPMOVMToVec8x16, ssa.OpAMD64VPMOVMToVec8x32, ssa.OpAMD64VPMOVMToVec8x64, diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 5a21c95df9e4f8..0136e41af76831 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1715,17 +1715,24 @@ (StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) - (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) (Load ptr mem) && t.Size() == 32 => (VMOVDQUload256 ptr mem) - (Store {t} ptr val mem) && t.Size() == 32 => (VMOVDQUstore256 ptr val mem) (Load ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem) - (Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem) +(LoadMasked32 ptr mask mem) && t.Size() == 16 => (VPMASK32load128 ptr mask mem) +(LoadMasked32 ptr mask mem) && t.Size() == 32 => (VPMASK32load256 ptr mask mem) +(LoadMasked64 ptr mask mem) && t.Size() == 16 => (VPMASK64load128 ptr mask mem) +(LoadMasked64 ptr mask mem) && t.Size() == 32 => (VPMASK64load256 ptr mask mem) + +(StoreMasked32 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK32store128 ptr mask val mem) +(StoreMasked32 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK32store256 ptr mask val mem) +(StoreMasked64 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK64store128 ptr mask val mem) +(StoreMasked64 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK64store256 ptr mask val mem) + (ZeroSIMD ) && t.Size() == 16 => (Zero128 ) (ZeroSIMD ) && t.Size() == 32 => (Zero256 ) (ZeroSIMD ) && t.Size() == 64 => (Zero512 ) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index cd4b5b2a06a8f0..66c37a495fbeb5 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -202,6 +202,12 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + // masked loads/stores, vector register or mask register + vloadv = regInfo{inputs: []regMask{gpspsb, v, 0}, outputs: vonly} + vstorev = regInfo{inputs: []regMask{gpspsb, v, v, 0}} + // vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} + // vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} + v01 = regInfo{inputs: nil, outputs: vonly} v11 = regInfo{inputs: vonly, outputs: vonly} v21 = regInfo{inputs: []regMask{v, v}, outputs: vonly} @@ -1279,6 +1285,17 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + // AVX2 32 and 64-bit element masked moves. + {name: "VPMASK32load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK32store128", argLength: 4, reg: vstorev, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + {name: "VPMASK64load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK64store128", argLength: 4, reg: vstorev, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + + {name: "VPMASK32load256", argLength: 3, reg: vloadv, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK32store256", argLength: 4, reg: vstorev, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + {name: "VPMASK64load256", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK64store256", argLength: 4, reg: vstorev, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x64", argLength: 1, reg: kw, asm: "VPMOVM2B"}, diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 716fe9b881843a..c1383199c4cebc 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -372,6 +372,14 @@ var genericOps = []opData{ {name: "Load", argLength: 2}, // Load from arg0. arg1=memory {name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value". {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + + // masked memory operations. + // TODO add 16 and 8 + {name: "LoadMasked32", argLength: 3}, // Load from arg0, arg1 = mask of 32-bits, arg2 = memory + {name: "LoadMasked64", argLength: 3}, // Load from arg0, arg1 = mask of 64-bits, arg2 = memory + {name: "StoreMasked32", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 32-bits, arg3 = memory + {name: "StoreMasked64", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 64-bits, arg3 = memory + // Normally we require that the source and destination of Move do not overlap. // There is an exception when we know all the loads will happen before all // the stores. In that case, overlap is ok. See diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 5736f0b8126484..213089a44b8233 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -631,6 +631,19 @@ func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, return v } +// NewValue4A returns a new value in the block with four arguments and zero aux values. +func (b *Block) NewValue4A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2, arg3 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Aux = aux + v.Args = []*Value{arg0, arg1, arg2, arg3} + arg0.Uses++ + arg1.Uses++ + arg2.Uses++ + arg3.Uses++ + return v +} + // NewValue4I returns a new value in the block with four arguments and auxint value. func (b *Block) NewValue4I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2, arg3 *Value) *Value { v := b.Func.newValue(op, t, b, pos) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9db3dbaf572198..8cc3e45902b584 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1169,6 +1169,14 @@ const ( OpAMD64VMOVDQUstore256 OpAMD64VMOVDQUload512 OpAMD64VMOVDQUstore512 + OpAMD64VPMASK32load128 + OpAMD64VPMASK32store128 + OpAMD64VPMASK64load128 + OpAMD64VPMASK64store128 + OpAMD64VPMASK32load256 + OpAMD64VPMASK32store256 + OpAMD64VPMASK64load256 + OpAMD64VPMASK64store256 OpAMD64VPMOVMToVec8x16 OpAMD64VPMOVMToVec8x32 OpAMD64VPMOVMToVec8x64 @@ -4246,6 +4254,10 @@ const ( OpLoad OpDereference OpStore + OpLoadMasked32 + OpLoadMasked64 + OpStoreMasked32 + OpStoreMasked64 OpMove OpZero OpStoreWB @@ -18481,6 +18493,134 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMASK32load128", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK32store128", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK64load128", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK64store128", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK32load256", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK32store256", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK64load256", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK64store256", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, { name: "VPMOVMToVec8x16", argLen: 1, @@ -59969,6 +60109,28 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "LoadMasked32", + argLen: 3, + generic: true, + }, + { + name: "LoadMasked64", + argLen: 3, + generic: true, + }, + { + name: "StoreMasked32", + auxType: auxTyp, + argLen: 4, + generic: true, + }, + { + name: "StoreMasked64", + auxType: auxTyp, + argLen: 4, + generic: true, + }, { name: "Move", auxType: auxTypSize, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index ecd4a21f43d8a8..d9560c55c22d4b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2462,6 +2462,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLoadMask8x32(v) case OpLoadMask8x64: return rewriteValueAMD64_OpLoadMask8x64(v) + case OpLoadMasked32: + return rewriteValueAMD64_OpLoadMasked32(v) + case OpLoadMasked64: + return rewriteValueAMD64_OpLoadMasked64(v) case OpLocalAddr: return rewriteValueAMD64_OpLocalAddr(v) case OpLsh16x16: @@ -5208,6 +5212,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpStoreMask8x32(v) case OpStoreMask8x64: return rewriteValueAMD64_OpStoreMask8x64(v) + case OpStoreMasked32: + return rewriteValueAMD64_OpStoreMasked32(v) + case OpStoreMasked64: + return rewriteValueAMD64_OpStoreMasked64(v) case OpSub16: v.Op = OpAMD64SUBL return true @@ -40555,6 +40563,78 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 16 + // result: (VPMASK32load128 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK32load128) + v.AddArg3(ptr, mask, mem) + return true + } + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 32 + // result: (VPMASK32load256 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK32load256) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 16 + // result: (VPMASK64load128 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK64load128) + v.AddArg3(ptr, mask, mem) + return true + } + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 32 + // result: (VPMASK64load256 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK64load256) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -53517,6 +53597,84 @@ func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 16 + // result: (VPMASK32store128 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK32store128) + v.AddArg4(ptr, mask, val, mem) + return true + } + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 32 + // result: (VPMASK32store256 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK32store256) + v.AddArg4(ptr, mask, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 16 + // result: (VPMASK64store128 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK64store128) + v.AddArg4(ptr, mask, val, mem) + return true + } + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 32 + // result: (VPMASK64store256 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK64store256) + v.AddArg4(ptr, mask, val, mem) + return true + } + return false +} func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 0284729a525502..7326ae2485247b 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1808,6 +1808,19 @@ func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*s } } +func simdMaskedLoad(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, n.Type(), args[0], args[1], s.mem()) + } +} + +func simdMaskedStore(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue4A(op, types.TypeMem, args[0].Type, args[1], args[2], args[0], s.mem()) + return nil + } +} + // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index e9121c9ee23358..3b406c0d6fc577 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1270,6 +1270,11 @@ func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa. return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) } +// newValue4A adds a new value with four arguments and an aux value to the current block. +func (s *state) newValue4A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue4A(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3) +} + // newValue4I adds a new value with four arguments and an auxint value to the current block. func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3) From acc1492b7d679914b485da0dd65d3faf202f4efa Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 21 Jul 2025 14:30:55 -0400 Subject: [PATCH 097/139] [dev.simd] cmd/compile: Generated code for AVX2 SIMD masked load/store This adds to the change in the earlier dev.simd CL. Generated by arch/internal/simdgen CL 689276 . Also includes one test for "it at least works once". Change-Id: I44a268cfc3bea06c5522ac2cfa04fe13a833e1dd Reviewed-on: https://go-review.googlesource.com/c/go/+/689335 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssagen/simdintrinsics.go | 24 +++ src/simd/simd_test.go | 10 -- src/simd/slicepart_amd64.go | 71 +++++++++ src/simd/slicepart_test.go | 40 +++++ src/simd/types_amd64.go | 144 ++++++++++++++++++ 5 files changed, 279 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 8b3b08f886f197..cf2e7fc67643f1 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2132,6 +2132,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Float32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Float32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Float64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Float64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Int32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedInt32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Int32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedInt64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Int64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Int64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Uint32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedUint32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Uint32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedUint64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Uint64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Uint64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 06af3458b5280c..541a33d34ad3fa 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -206,16 +206,6 @@ func TestPairDotProdAccumulate(t *testing.T) { } } -// checkInt8Slices ensures that b and a are equal, to the end of b. -// also serves to use the slices, to prevent accidental optimization. -func checkInt8Slices(t *testing.T, a, b []int8) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 7f5247cd8c25b8..920cdb8ccd9771 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -37,6 +37,10 @@ func int64atP32(p *int32) *int64 { return (*int64)(unsafe.Pointer(p)) } +func int32atP64(p *int64) *int32 { + return (*int32)(unsafe.Pointer(p)) +} + /* unsigned versions of integer slice part loads */ // LoadUint8x16SlicePart loads a Uint8x16 from the slice s. @@ -385,3 +389,70 @@ func (x Int16x8) StoreSlicePart(s []int16) { } return } + +var vecMask64 = [16]int64{ + -1, -1, -1, -1, + -1, -1, -1, -1, + 0, 0, 0, 0, + 0, 0, 0, 0, +} + +// paInt32x4 is an unchecked cast from a slice to an +// pointer-to-array type, for used in a masked +// load/store. In practice, the slice will be too +// short, so this has to be unsafe, and its only +// use must be with an instruction with masked +// load/store effect (including faults). +func paInt32x4(s []int32) *[4]int32 { + return (*[4]int32)(unsafe.Pointer(&s[0])) +} + +/* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ + +func LoadInt32x4SlicePart(s []int32) Int32x4 { + l := len(s) + if l >= 4 { + return LoadInt32x4Slice(s) + } + if l == 0 { + var x Int32x4 + return x + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +func (x Int32x4) StoreSlicePart(s []int32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// func LoadInt32x8SlicePart(s []int32) Int32x8 { +// } + +// func LoadInt64x2SlicePart(s []int64) Int64x2 { +// } + +// func LoadInt64x4SlicePart(s []int64) Int64x4 { +// } + +// func (x Int32x8) StoreSlicePart(s []int32) { +// } + +// func (x Int64x4) StoreSlicePart(s []int64) { +// } + +// func (x Int64x8) StoreSlicePart(s []int64) { +// } + +// Handle float32, float64, uint32, and uint64 with ugly casts. diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index 6e047248790711..cd282be7b1c2af 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -177,3 +177,43 @@ func TestSlicesPartStoreUint8x32(t *testing.T) { } } } + +func TestSlicePartInt32(t *testing.T) { + L := 4 + c := []int32{1, 2, 3, 4, 5, -1, -1, -1, -1} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadInt32x4SlicePart(e) + // d contains what a ought to contain + d := make([]int32, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]int32, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]int32, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) + } + } + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 998a8f9fe1df0b..c1676ff34e27a0 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -28,6 +28,18 @@ func LoadFloat32x4(y *[4]float32) Float32x4 //go:noescape func (x Float32x4) Store(y *[4]float32) +// LoadMaskedFloat32x4 loads a Float32x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat32x4(y *[4]float32, mask Mask32x4) Float32x4 + +// StoreMasked stores a Float32x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float32x4) StoreMasked(y *[4]float32, mask Mask32x4) + // Float64x2 is a 128-bit SIMD vector of 2 float64 type Float64x2 struct { float64x2 v128 @@ -47,6 +59,18 @@ func LoadFloat64x2(y *[2]float64) Float64x2 //go:noescape func (x Float64x2) Store(y *[2]float64) +// LoadMaskedFloat64x2 loads a Float64x2 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat64x2(y *[2]float64, mask Mask64x2) Float64x2 + +// StoreMasked stores a Float64x2 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float64x2) StoreMasked(y *[2]float64, mask Mask64x2) + // Int8x16 is a 128-bit SIMD vector of 16 int8 type Int8x16 struct { int8x16 v128 @@ -104,6 +128,18 @@ func LoadInt32x4(y *[4]int32) Int32x4 //go:noescape func (x Int32x4) Store(y *[4]int32) +// LoadMaskedInt32x4 loads a Int32x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt32x4(y *[4]int32, mask Mask32x4) Int32x4 + +// StoreMasked stores a Int32x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int32x4) StoreMasked(y *[4]int32, mask Mask32x4) + // Int64x2 is a 128-bit SIMD vector of 2 int64 type Int64x2 struct { int64x2 v128 @@ -123,6 +159,18 @@ func LoadInt64x2(y *[2]int64) Int64x2 //go:noescape func (x Int64x2) Store(y *[2]int64) +// LoadMaskedInt64x2 loads a Int64x2 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt64x2(y *[2]int64, mask Mask64x2) Int64x2 + +// StoreMasked stores a Int64x2 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int64x2) StoreMasked(y *[2]int64, mask Mask64x2) + // Uint8x16 is a 128-bit SIMD vector of 16 uint8 type Uint8x16 struct { uint8x16 v128 @@ -180,6 +228,18 @@ func LoadUint32x4(y *[4]uint32) Uint32x4 //go:noescape func (x Uint32x4) Store(y *[4]uint32) +// LoadMaskedUint32x4 loads a Uint32x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint32x4(y *[4]uint32, mask Mask32x4) Uint32x4 + +// StoreMasked stores a Uint32x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint32x4) StoreMasked(y *[4]uint32, mask Mask32x4) + // Uint64x2 is a 128-bit SIMD vector of 2 uint64 type Uint64x2 struct { uint64x2 v128 @@ -199,6 +259,18 @@ func LoadUint64x2(y *[2]uint64) Uint64x2 //go:noescape func (x Uint64x2) Store(y *[2]uint64) +// LoadMaskedUint64x2 loads a Uint64x2 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint64x2(y *[2]uint64, mask Mask64x2) Uint64x2 + +// StoreMasked stores a Uint64x2 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint64x2) StoreMasked(y *[2]uint64, mask Mask64x2) + // Mask8x16 is a 128-bit SIMD vector of 16 int8 type Mask8x16 struct { int8x16 v128 @@ -311,6 +383,18 @@ func LoadFloat32x8(y *[8]float32) Float32x8 //go:noescape func (x Float32x8) Store(y *[8]float32) +// LoadMaskedFloat32x8 loads a Float32x8 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat32x8(y *[8]float32, mask Mask32x8) Float32x8 + +// StoreMasked stores a Float32x8 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float32x8) StoreMasked(y *[8]float32, mask Mask32x8) + // Float64x4 is a 256-bit SIMD vector of 4 float64 type Float64x4 struct { float64x4 v256 @@ -330,6 +414,18 @@ func LoadFloat64x4(y *[4]float64) Float64x4 //go:noescape func (x Float64x4) Store(y *[4]float64) +// LoadMaskedFloat64x4 loads a Float64x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat64x4(y *[4]float64, mask Mask64x4) Float64x4 + +// StoreMasked stores a Float64x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float64x4) StoreMasked(y *[4]float64, mask Mask64x4) + // Int8x32 is a 256-bit SIMD vector of 32 int8 type Int8x32 struct { int8x32 v256 @@ -387,6 +483,18 @@ func LoadInt32x8(y *[8]int32) Int32x8 //go:noescape func (x Int32x8) Store(y *[8]int32) +// LoadMaskedInt32x8 loads a Int32x8 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt32x8(y *[8]int32, mask Mask32x8) Int32x8 + +// StoreMasked stores a Int32x8 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int32x8) StoreMasked(y *[8]int32, mask Mask32x8) + // Int64x4 is a 256-bit SIMD vector of 4 int64 type Int64x4 struct { int64x4 v256 @@ -406,6 +514,18 @@ func LoadInt64x4(y *[4]int64) Int64x4 //go:noescape func (x Int64x4) Store(y *[4]int64) +// LoadMaskedInt64x4 loads a Int64x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt64x4(y *[4]int64, mask Mask64x4) Int64x4 + +// StoreMasked stores a Int64x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int64x4) StoreMasked(y *[4]int64, mask Mask64x4) + // Uint8x32 is a 256-bit SIMD vector of 32 uint8 type Uint8x32 struct { uint8x32 v256 @@ -463,6 +583,18 @@ func LoadUint32x8(y *[8]uint32) Uint32x8 //go:noescape func (x Uint32x8) Store(y *[8]uint32) +// LoadMaskedUint32x8 loads a Uint32x8 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint32x8(y *[8]uint32, mask Mask32x8) Uint32x8 + +// StoreMasked stores a Uint32x8 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint32x8) StoreMasked(y *[8]uint32, mask Mask32x8) + // Uint64x4 is a 256-bit SIMD vector of 4 uint64 type Uint64x4 struct { uint64x4 v256 @@ -482,6 +614,18 @@ func LoadUint64x4(y *[4]uint64) Uint64x4 //go:noescape func (x Uint64x4) Store(y *[4]uint64) +// LoadMaskedUint64x4 loads a Uint64x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint64x4(y *[4]uint64, mask Mask64x4) Uint64x4 + +// StoreMasked stores a Uint64x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint64x4) StoreMasked(y *[4]uint64, mask Mask64x4) + // Mask8x32 is a 256-bit SIMD vector of 32 int8 type Mask8x32 struct { int8x32 v256 From 761894d4a5d737fb2a00404d4de850f13c368ccd Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 21 Jul 2025 17:31:17 -0400 Subject: [PATCH 098/139] [dev.simd] simd: add partial slice load/store for 32/64-bits on AVX2 These all use int-vector-masked loads and stores. Partial set of tests (for all NxK shapes, thought not all types). Change-Id: I8f493aaa9228647e08ea5badb06dcfe716d6925d Reviewed-on: https://go-review.googlesource.com/c/go/+/689336 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/simd/slicepart_amd64.go | 310 ++++++++++++++++++++++++++++++++++-- src/simd/slicepart_test.go | 124 +++++++++++++++ 2 files changed, 422 insertions(+), 12 deletions(-) diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 920cdb8ccd9771..00025775be114b 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -407,8 +407,23 @@ func paInt32x4(s []int32) *[4]int32 { return (*[4]int32)(unsafe.Pointer(&s[0])) } +func paInt32x8(s []int32) *[8]int32 { + return (*[8]int32)(unsafe.Pointer(&s[0])) +} + +func paInt64x2(s []int64) *[2]int64 { + return (*[2]int64)(unsafe.Pointer(&s[0])) +} + +func paInt64x4(s []int64) *[4]int64 { + return (*[4]int64)(unsafe.Pointer(&s[0])) +} + /* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ +// LoadInt32x4SlicePart loads a Int32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. func LoadInt32x4SlicePart(s []int32) Int32x4 { l := len(s) if l >= 4 { @@ -423,6 +438,9 @@ func LoadInt32x4SlicePart(s []int32) Int32x4 { return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) } +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. func (x Int32x4) StoreSlicePart(s []int32) { l := len(s) if l >= 4 { @@ -437,22 +455,290 @@ func (x Int32x4) StoreSlicePart(s []int32) { x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) } -// func LoadInt32x8SlicePart(s []int32) Int32x8 { -// } +// LoadInt32x8SlicePart loads a Int32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. +func LoadInt32x8SlicePart(s []int32) Int32x8 { + l := len(s) + if l >= 8 { + return LoadInt32x8Slice(s) + } + if l == 0 { + var x Int32x8 + return x + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} -// func LoadInt64x2SlicePart(s []int64) Int64x2 { -// } +// LoadInt64x2SlicePart loads a Int64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. +func LoadInt64x2SlicePart(s []int64) Int64x2 { + l := len(s) + if l >= 2 { + return LoadInt64x2Slice(s) + } + if l == 0 { + var x Int64x2 + return x + } -// func LoadInt64x4SlicePart(s []int64) Int64x4 { -// } + mask := vecMask64[8-l:] + return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} -// func (x Int32x8) StoreSlicePart(s []int32) { -// } +// LoadInt64x4SlicePart loads a Int64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. +func LoadInt64x4SlicePart(s []int64) Int64x4 { + l := len(s) + if l >= 4 { + return LoadInt64x4Slice(s) + } + if l == 0 { + var x Int64x4 + return x + } -// func (x Int64x4) StoreSlicePart(s []int64) { -// } + mask := vecMask64[8-l:] + return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} -// func (x Int64x8) StoreSlicePart(s []int64) { -// } +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x8) StoreSlicePart(s []int32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x2) StoreSlicePart(s []int64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[8-l:] + x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x4) StoreSlicePart(s []int64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[8-l:] + x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} // Handle float32, float64, uint32, and uint64 with ugly casts. + +// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. +func LoadUint32x4SlicePart(s []uint32) Uint32x4 { + if len(s) == 0 { + var zero Uint32x4 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x4SlicePart(t).AsUint32x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x4) StoreSlicePart(s []uint32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x4().StoreSlicePart(t) +} + +// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. +func LoadUint32x8SlicePart(s []uint32) Uint32x8 { + if len(s) == 0 { + var zero Uint32x8 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x8SlicePart(t).AsUint32x8() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x8) StoreSlicePart(s []uint32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x8().StoreSlicePart(t) +} + +// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. +func LoadUint64x2SlicePart(s []uint64) Uint64x2 { + if len(s) == 0 { + var zero Uint64x2 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x2SlicePart(t).AsUint64x2() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x2) StoreSlicePart(s []uint64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x2().StoreSlicePart(t) +} + +// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. +func LoadUint64x4SlicePart(s []uint64) Uint64x4 { + if len(s) == 0 { + var zero Uint64x4 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x4SlicePart(t).AsUint64x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x4) StoreSlicePart(s []uint64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x4().StoreSlicePart(t) +} + +// Float32xK and Float64xK + +// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. +func LoadFloat32x4SlicePart(s []float32) Float32x4 { + if len(s) == 0 { + var zero Float32x4 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x4SlicePart(t).AsFloat32x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x4) StoreSlicePart(s []float32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x4().StoreSlicePart(t) +} + +// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. +func LoadFloat32x8SlicePart(s []float32) Float32x8 { + if len(s) == 0 { + var zero Float32x8 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x8SlicePart(t).AsFloat32x8() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x8) StoreSlicePart(s []float32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x8().StoreSlicePart(t) +} + +// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. +func LoadFloat64x2SlicePart(s []float64) Float64x2 { + if len(s) == 0 { + var zero Float64x2 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x2SlicePart(t).AsFloat64x2() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x2) StoreSlicePart(s []float64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x2().StoreSlicePart(t) +} + +// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. +func LoadFloat64x4SlicePart(s []float64) Float64x4 { + if len(s) == 0 { + var zero Float64x4 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x4SlicePart(t).AsFloat64x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x4) StoreSlicePart(s []float64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x4().StoreSlicePart(t) +} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index cd282be7b1c2af..cfdb7581d9e9e7 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -179,6 +179,7 @@ func TestSlicesPartStoreUint8x32(t *testing.T) { } func TestSlicePartInt32(t *testing.T) { + // 32x4 L := 4 c := []int32{1, 2, 3, 4, 5, -1, -1, -1, -1} a := c[:L+1] @@ -217,3 +218,126 @@ func TestSlicePartInt32(t *testing.T) { } } } + +func TestSlicePartUint64(t *testing.T) { + // 64x4 + L := 4 + c := []uint64{1, 2, 3, 4, 5, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadUint64x4SlicePart(e) + // d contains what a ought to contain + d := make([]uint64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]uint64, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]uint64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) + } + } + } +} + +func TestSlicePartFloat64(t *testing.T) { + // 64x2 + L := 2 + c := []float64{1, 2, 3, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadFloat64x2SlicePart(e) + // d contains what a ought to contain + d := make([]float64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]float64, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]float64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} + +func TestSlicePartFloat32(t *testing.T) { + // 32x8 + L := 8 + c := []float32{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadFloat32x8SlicePart(e) + // d contains what a ought to contain + d := make([]float32, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]float32, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]float32, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} From e62e377ed6d34cc4b085347b3abfa0566e7946c8 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 1 Aug 2025 16:17:32 -0400 Subject: [PATCH 099/139] [dev.simd] cmd/compile, simd: generated code from repaired simdgen sort generated by simdgen CL 689655 (which names a different CL, because it was submitted before realizing that git had lost a from a stack, somehow) Change-Id: Iab2868e848c221de98995ba0c632f97e2ee97670 Reviewed-on: https://go-review.googlesource.com/c/go/+/692336 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssa/_gen/simdAMD64ops.go | 1756 +- .../internal/ssa/_gen/simdgenericOps.go | 3348 +-- src/cmd/compile/internal/ssa/opGen.go | 23732 ++++++++-------- 3 files changed, 14418 insertions(+), 14418 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5a51e4400ad65f..3ab0eb527f89b6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -3,992 +3,992 @@ package main func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: w21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPSMasked512", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPS512", argLength: 2, reg: w21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPSMasked512", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPS512", argLength: 2, reg: w21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPS512", argLength: 2, reg: w21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPS512", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPS512", argLength: 1, reg: w11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPS512", argLength: 2, reg: w21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: w21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCPPS128", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPSMasked128", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPS128", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPS512", argLength: 2, reg: w21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPSMasked256", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCPPS256", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCOMPRESSPSMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PS256", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PS256", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPSMasked256", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPS256", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPDMasked128", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PD128", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPD128", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPDMasked128", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPD128", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPDMasked256", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PD256", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPD256", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPDMasked256", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPD256", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD512", argLength: 2, reg: w21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPDMasked512", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPD512", argLength: 2, reg: w21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPDMasked512", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPS512", argLength: 2, reg: w21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: w21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPDMasked512", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: w21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPD512", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPD512", argLength: 1, reg: w11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPD512", argLength: 2, reg: w21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSWMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTW256", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: w21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: w11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: w11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: w11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQW512", argLength: 2, reg: w2k, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTW512", argLength: 2, reg: w2k, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLW512", argLength: 2, reg: w21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWD512", argLength: 2, reg: w21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTW512", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: w21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: w21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: w21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: w21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLW512", argLength: 2, reg: wfpw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAW512", argLength: 2, reg: wfpw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked128", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSWMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTW128", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD512", argLength: 1, reg: w11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: w21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: w21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: w21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD512", argLength: 2, reg: w2k, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ512", argLength: 2, reg: w2k, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQW512", argLength: 2, reg: w2k, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTB512", argLength: 2, reg: w2k, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD512", argLength: 2, reg: w2k, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORD512", argLength: 2, reg: w21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSD512", argLength: 3, reg: w31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTD512", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVD512", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVD512", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDS512", argLength: 3, reg: w31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ512", argLength: 2, reg: w2k, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTW512", argLength: 2, reg: w2k, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCOMPRESSBMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSBMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSDMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSDMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSQMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSQMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSWMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSWMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSD512", argLength: 3, reg: w31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: w31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLD512", argLength: 2, reg: wfpw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAD512", argLength: 2, reg: wfpw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRADMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSD512", argLength: 3, reg: w31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORD512", argLength: 2, reg: w21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORDMasked512", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSDMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSD128", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSD512", argLength: 3, reg: w31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPWSSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTD128", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVD128", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVD128", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPWSSDS128", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDS512", argLength: 3, reg: w31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPWSSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRADMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSDMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2B128", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2B256", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2B512", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2BMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2BMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2BMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2Q512", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2W128", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2W256", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2W512", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2WMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2WMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2WMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPD512", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQ512", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMW128", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMW256", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMW512", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTD256", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVD256", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVD256", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRADMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ128", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSQMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: w21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: w21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQMasked128", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: w21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLQ128", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLQMasked128", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: w21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD512", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQ128", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: w21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: w21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVQ128", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVQMasked128", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVQ128", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVQMasked128", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVQ128", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVQ128", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQ256", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSQMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQ256", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQ256", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQ256", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD512", argLength: 2, reg: wfpw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: w21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: w21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQQ512", argLength: 2, reg: w2k, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQ512", argLength: 2, reg: w2k, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQ512", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQ512", argLength: 2, reg: w21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQ512", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQ512", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQ512", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW512", argLength: 2, reg: wfpw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD512", argLength: 2, reg: wfpw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQ512", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQ512", argLength: 2, reg: w21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSBMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSBMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSB512", argLength: 1, reg: w11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTB512", argLength: 2, reg: w2k, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSB512", argLength: 2, reg: w21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSB512", argLength: 2, reg: w21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBB512", argLength: 2, reg: w21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMW256", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2W256", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2WMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUW512", argLength: 2, reg: w21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMW512", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2W512", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2WMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMW128", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2W128", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2WMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW512", argLength: 2, reg: wfpw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMPD512", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMQ512", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2Q512", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2B128", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2BMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2B256", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2BMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUB512", argLength: 2, reg: w21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2B512", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2BMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: w21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: w21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPXORD512", argLength: 2, reg: w21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked128", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPXORDMasked256", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: w21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCPPS128", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCPPS256", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: w11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: w11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: w21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: w21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 7b016b517d2253..654c1ee17180ee 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -3,1796 +3,1796 @@ package main func simdGenericOps() []opData { return []opData{ + {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, + {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, + {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, + {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, + {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, + {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, + {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, + {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, + {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, + {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, + {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, + {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, + {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, + {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AddFloat64x8", argLength: 2, commutative: true}, + {name: "AddInt8x16", argLength: 2, commutative: true}, + {name: "AddInt8x32", argLength: 2, commutative: true}, + {name: "AddInt8x64", argLength: 2, commutative: true}, + {name: "AddInt16x8", argLength: 2, commutative: true}, + {name: "AddInt16x16", argLength: 2, commutative: true}, + {name: "AddInt16x32", argLength: 2, commutative: true}, + {name: "AddInt32x4", argLength: 2, commutative: true}, + {name: "AddInt32x8", argLength: 2, commutative: true}, + {name: "AddInt32x16", argLength: 2, commutative: true}, + {name: "AddInt64x2", argLength: 2, commutative: true}, + {name: "AddInt64x4", argLength: 2, commutative: true}, + {name: "AddInt64x8", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "AddMaskedFloat32x8", argLength: 3, commutative: true}, {name: "AddMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "AddMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "AddMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "AddMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, + {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, + {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, + {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, + {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, + {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, + {name: "AddMaskedInt32x4", argLength: 3, commutative: true}, + {name: "AddMaskedInt32x8", argLength: 3, commutative: true}, + {name: "AddMaskedInt32x16", argLength: 3, commutative: true}, + {name: "AddMaskedInt64x2", argLength: 3, commutative: true}, + {name: "AddMaskedInt64x4", argLength: 3, commutative: true}, + {name: "AddMaskedInt64x8", argLength: 3, commutative: true}, + {name: "AddMaskedUint8x16", argLength: 3, commutative: true}, + {name: "AddMaskedUint8x32", argLength: 3, commutative: true}, + {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, + {name: "AddMaskedUint16x8", argLength: 3, commutative: true}, + {name: "AddMaskedUint16x16", argLength: 3, commutative: true}, + {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, + {name: "AddMaskedUint32x4", argLength: 3, commutative: true}, + {name: "AddMaskedUint32x8", argLength: 3, commutative: true}, + {name: "AddMaskedUint32x16", argLength: 3, commutative: true}, + {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, + {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, + {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, + {name: "AddSubFloat32x4", argLength: 2, commutative: false}, + {name: "AddSubFloat32x8", argLength: 2, commutative: false}, + {name: "AddSubFloat64x2", argLength: 2, commutative: false}, + {name: "AddSubFloat64x4", argLength: 2, commutative: false}, + {name: "AddUint8x16", argLength: 2, commutative: true}, + {name: "AddUint8x32", argLength: 2, commutative: true}, + {name: "AddUint8x64", argLength: 2, commutative: true}, + {name: "AddUint16x8", argLength: 2, commutative: true}, + {name: "AddUint16x16", argLength: 2, commutative: true}, + {name: "AddUint16x32", argLength: 2, commutative: true}, + {name: "AddUint32x4", argLength: 2, commutative: true}, + {name: "AddUint32x8", argLength: 2, commutative: true}, + {name: "AddUint32x16", argLength: 2, commutative: true}, + {name: "AddUint64x2", argLength: 2, commutative: true}, + {name: "AddUint64x4", argLength: 2, commutative: true}, + {name: "AddUint64x8", argLength: 2, commutative: true}, + {name: "AndInt8x16", argLength: 2, commutative: true}, + {name: "AndInt8x32", argLength: 2, commutative: true}, + {name: "AndInt16x8", argLength: 2, commutative: true}, + {name: "AndInt16x16", argLength: 2, commutative: true}, + {name: "AndInt32x4", argLength: 2, commutative: true}, + {name: "AndInt32x8", argLength: 2, commutative: true}, + {name: "AndInt32x16", argLength: 2, commutative: true}, + {name: "AndInt64x2", argLength: 2, commutative: true}, + {name: "AndInt64x4", argLength: 2, commutative: true}, + {name: "AndInt64x8", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, + {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, + {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, + {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, + {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, + {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, + {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, + {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, + {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, + {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, + {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, + {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, + {name: "AndNotInt8x16", argLength: 2, commutative: false}, + {name: "AndNotInt8x32", argLength: 2, commutative: false}, + {name: "AndNotInt16x8", argLength: 2, commutative: false}, + {name: "AndNotInt16x16", argLength: 2, commutative: false}, + {name: "AndNotInt32x4", argLength: 2, commutative: false}, + {name: "AndNotInt32x8", argLength: 2, commutative: false}, + {name: "AndNotInt32x16", argLength: 2, commutative: false}, + {name: "AndNotInt64x2", argLength: 2, commutative: false}, + {name: "AndNotInt64x4", argLength: 2, commutative: false}, + {name: "AndNotInt64x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, + {name: "AndNotUint8x16", argLength: 2, commutative: false}, + {name: "AndNotUint8x32", argLength: 2, commutative: false}, + {name: "AndNotUint16x8", argLength: 2, commutative: false}, + {name: "AndNotUint16x16", argLength: 2, commutative: false}, + {name: "AndNotUint32x4", argLength: 2, commutative: false}, + {name: "AndNotUint32x8", argLength: 2, commutative: false}, + {name: "AndNotUint32x16", argLength: 2, commutative: false}, + {name: "AndNotUint64x2", argLength: 2, commutative: false}, + {name: "AndNotUint64x4", argLength: 2, commutative: false}, + {name: "AndNotUint64x8", argLength: 2, commutative: false}, + {name: "AndUint8x16", argLength: 2, commutative: true}, + {name: "AndUint8x32", argLength: 2, commutative: true}, + {name: "AndUint16x8", argLength: 2, commutative: true}, + {name: "AndUint16x16", argLength: 2, commutative: true}, + {name: "AndUint32x4", argLength: 2, commutative: true}, + {name: "AndUint32x8", argLength: 2, commutative: true}, + {name: "AndUint32x16", argLength: 2, commutative: true}, + {name: "AndUint64x2", argLength: 2, commutative: true}, + {name: "AndUint64x4", argLength: 2, commutative: true}, + {name: "AndUint64x8", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, + {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, + {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, + {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, + {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, + {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, + {name: "AverageUint8x16", argLength: 2, commutative: true}, + {name: "AverageUint8x32", argLength: 2, commutative: true}, + {name: "AverageUint8x64", argLength: 2, commutative: true}, + {name: "AverageUint16x8", argLength: 2, commutative: true}, + {name: "AverageUint16x16", argLength: 2, commutative: true}, + {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "CeilFloat32x4", argLength: 1, commutative: false}, + {name: "CeilFloat32x8", argLength: 1, commutative: false}, + {name: "CeilFloat64x2", argLength: 1, commutative: false}, + {name: "CeilFloat64x4", argLength: 1, commutative: false}, + {name: "CompressFloat32x4", argLength: 2, commutative: false}, + {name: "CompressFloat32x8", argLength: 2, commutative: false}, {name: "CompressFloat32x16", argLength: 2, commutative: false}, + {name: "CompressFloat64x2", argLength: 2, commutative: false}, + {name: "CompressFloat64x4", argLength: 2, commutative: false}, + {name: "CompressFloat64x8", argLength: 2, commutative: false}, + {name: "CompressInt8x16", argLength: 2, commutative: false}, + {name: "CompressInt8x32", argLength: 2, commutative: false}, + {name: "CompressInt8x64", argLength: 2, commutative: false}, + {name: "CompressInt16x8", argLength: 2, commutative: false}, + {name: "CompressInt16x16", argLength: 2, commutative: false}, + {name: "CompressInt16x32", argLength: 2, commutative: false}, + {name: "CompressInt32x4", argLength: 2, commutative: false}, + {name: "CompressInt32x8", argLength: 2, commutative: false}, + {name: "CompressInt32x16", argLength: 2, commutative: false}, + {name: "CompressInt64x2", argLength: 2, commutative: false}, + {name: "CompressInt64x4", argLength: 2, commutative: false}, + {name: "CompressInt64x8", argLength: 2, commutative: false}, + {name: "CompressUint8x16", argLength: 2, commutative: false}, + {name: "CompressUint8x32", argLength: 2, commutative: false}, + {name: "CompressUint8x64", argLength: 2, commutative: false}, + {name: "CompressUint16x8", argLength: 2, commutative: false}, + {name: "CompressUint16x16", argLength: 2, commutative: false}, + {name: "CompressUint16x32", argLength: 2, commutative: false}, + {name: "CompressUint32x4", argLength: 2, commutative: false}, + {name: "CompressUint32x8", argLength: 2, commutative: false}, + {name: "CompressUint32x16", argLength: 2, commutative: false}, + {name: "CompressUint64x2", argLength: 2, commutative: false}, + {name: "CompressUint64x4", argLength: 2, commutative: false}, + {name: "CompressUint64x8", argLength: 2, commutative: false}, + {name: "DivFloat32x4", argLength: 2, commutative: false}, + {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, + {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "DivFloat64x4", argLength: 2, commutative: false}, + {name: "DivFloat64x8", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, {name: "DivMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, + {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, + {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, + {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "EqualInt8x16", argLength: 2, commutative: true}, + {name: "EqualInt8x32", argLength: 2, commutative: true}, + {name: "EqualInt8x64", argLength: 2, commutative: true}, + {name: "EqualInt16x8", argLength: 2, commutative: true}, + {name: "EqualInt16x16", argLength: 2, commutative: true}, + {name: "EqualInt16x32", argLength: 2, commutative: true}, + {name: "EqualInt32x4", argLength: 2, commutative: true}, + {name: "EqualInt32x8", argLength: 2, commutative: true}, + {name: "EqualInt32x16", argLength: 2, commutative: true}, + {name: "EqualInt64x2", argLength: 2, commutative: true}, + {name: "EqualInt64x4", argLength: 2, commutative: true}, + {name: "EqualInt64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "EqualMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "GreaterFloat32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "IsNanFloat32x16", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "LessFloat32x16", argLength: 2, commutative: false}, - {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "LessMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "MaxFloat32x16", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "MinFloat32x16", argLength: 2, commutative: true}, - {name: "MinMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "MulFloat32x16", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, - {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "SqrtFloat32x16", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "SubFloat32x16", argLength: 2, commutative: false}, - {name: "SubMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "AddFloat32x4", argLength: 2, commutative: true}, - {name: "AddMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "AddSubFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "CeilFloat32x4", argLength: 1, commutative: false}, - {name: "CompressFloat32x4", argLength: 2, commutative: false}, - {name: "DivFloat32x4", argLength: 2, commutative: false}, - {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, - {name: "EqualFloat32x4", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, + {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, + {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, + {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, + {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, + {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, + {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, + {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, + {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, + {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, + {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, + {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, + {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, + {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, + {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, + {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, + {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, + {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, + {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, + {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, + {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, + {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, + {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "EqualUint16x8", argLength: 2, commutative: true}, + {name: "EqualUint16x16", argLength: 2, commutative: true}, + {name: "EqualUint16x32", argLength: 2, commutative: true}, + {name: "EqualUint32x4", argLength: 2, commutative: true}, + {name: "EqualUint32x8", argLength: 2, commutative: true}, + {name: "EqualUint32x16", argLength: 2, commutative: true}, + {name: "EqualUint64x2", argLength: 2, commutative: true}, + {name: "EqualUint64x4", argLength: 2, commutative: true}, + {name: "EqualUint64x8", argLength: 2, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, + {name: "FloorFloat32x8", argLength: 1, commutative: false}, + {name: "FloorFloat64x2", argLength: 1, commutative: false}, + {name: "FloorFloat64x4", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, {name: "FusedMultiplyAddMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, {name: "FusedMultiplyAddSubMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, {name: "FusedMultiplySubAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "GreaterFloat32x4", argLength: 2, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, + {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, + {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, + {name: "GreaterFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterInt8x16", argLength: 2, commutative: false}, + {name: "GreaterInt8x32", argLength: 2, commutative: false}, + {name: "GreaterInt8x64", argLength: 2, commutative: false}, + {name: "GreaterInt16x8", argLength: 2, commutative: false}, + {name: "GreaterInt16x16", argLength: 2, commutative: false}, + {name: "GreaterInt16x32", argLength: 2, commutative: false}, + {name: "GreaterInt32x4", argLength: 2, commutative: false}, + {name: "GreaterInt32x8", argLength: 2, commutative: false}, + {name: "GreaterInt32x16", argLength: 2, commutative: false}, + {name: "GreaterInt64x2", argLength: 2, commutative: false}, + {name: "GreaterInt64x4", argLength: 2, commutative: false}, + {name: "GreaterInt64x8", argLength: 2, commutative: false}, {name: "GreaterMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, + {name: "GreaterUint8x16", argLength: 2, commutative: false}, + {name: "GreaterUint8x32", argLength: 2, commutative: false}, + {name: "GreaterUint8x64", argLength: 2, commutative: false}, + {name: "GreaterUint16x8", argLength: 2, commutative: false}, + {name: "GreaterUint16x16", argLength: 2, commutative: false}, + {name: "GreaterUint16x32", argLength: 2, commutative: false}, + {name: "GreaterUint32x4", argLength: 2, commutative: false}, + {name: "GreaterUint32x8", argLength: 2, commutative: false}, + {name: "GreaterUint32x16", argLength: 2, commutative: false}, + {name: "GreaterUint64x2", argLength: 2, commutative: false}, + {name: "GreaterUint64x4", argLength: 2, commutative: false}, + {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, + {name: "IsNanFloat32x8", argLength: 2, commutative: true}, + {name: "IsNanFloat32x16", argLength: 2, commutative: true}, + {name: "IsNanFloat64x2", argLength: 2, commutative: true}, + {name: "IsNanFloat64x4", argLength: 2, commutative: true}, + {name: "IsNanFloat64x8", argLength: 2, commutative: true}, {name: "IsNanMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "LessFloat32x4", argLength: 2, commutative: false}, + {name: "IsNanMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat64x8", argLength: 3, commutative: true}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "MaxFloat32x4", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "MinFloat32x4", argLength: 2, commutative: true}, - {name: "MinMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "MulFloat32x4", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, - {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, - {name: "RoundFloat32x4", argLength: 1, commutative: false}, - {name: "SqrtFloat32x4", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "SubFloat32x4", argLength: 2, commutative: false}, - {name: "SubMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "TruncFloat32x4", argLength: 1, commutative: false}, - {name: "AddFloat32x8", argLength: 2, commutative: true}, - {name: "AddMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "AddSubFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "CeilFloat32x8", argLength: 1, commutative: false}, - {name: "CompressFloat32x8", argLength: 2, commutative: false}, - {name: "DivFloat32x8", argLength: 2, commutative: false}, - {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, - {name: "EqualFloat32x8", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "FloorFloat32x8", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "GreaterFloat32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "IsNanFloat32x8", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "LessFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, + {name: "LessEqualInt8x16", argLength: 2, commutative: false}, + {name: "LessEqualInt8x32", argLength: 2, commutative: false}, + {name: "LessEqualInt8x64", argLength: 2, commutative: false}, + {name: "LessEqualInt16x8", argLength: 2, commutative: false}, + {name: "LessEqualInt16x16", argLength: 2, commutative: false}, + {name: "LessEqualInt16x32", argLength: 2, commutative: false}, + {name: "LessEqualInt32x4", argLength: 2, commutative: false}, + {name: "LessEqualInt32x8", argLength: 2, commutative: false}, + {name: "LessEqualInt32x16", argLength: 2, commutative: false}, + {name: "LessEqualInt64x2", argLength: 2, commutative: false}, + {name: "LessEqualInt64x4", argLength: 2, commutative: false}, + {name: "LessEqualInt64x8", argLength: 2, commutative: false}, + {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "LessEqualUint8x16", argLength: 2, commutative: false}, + {name: "LessEqualUint8x32", argLength: 2, commutative: false}, + {name: "LessEqualUint8x64", argLength: 2, commutative: false}, + {name: "LessEqualUint16x8", argLength: 2, commutative: false}, + {name: "LessEqualUint16x16", argLength: 2, commutative: false}, + {name: "LessEqualUint16x32", argLength: 2, commutative: false}, + {name: "LessEqualUint32x4", argLength: 2, commutative: false}, + {name: "LessEqualUint32x8", argLength: 2, commutative: false}, + {name: "LessEqualUint32x16", argLength: 2, commutative: false}, + {name: "LessEqualUint64x2", argLength: 2, commutative: false}, + {name: "LessEqualUint64x4", argLength: 2, commutative: false}, + {name: "LessEqualUint64x8", argLength: 2, commutative: false}, + {name: "LessFloat32x4", argLength: 2, commutative: false}, + {name: "LessFloat32x8", argLength: 2, commutative: false}, + {name: "LessFloat32x16", argLength: 2, commutative: false}, + {name: "LessFloat64x2", argLength: 2, commutative: false}, + {name: "LessFloat64x4", argLength: 2, commutative: false}, + {name: "LessFloat64x8", argLength: 2, commutative: false}, + {name: "LessInt8x16", argLength: 2, commutative: false}, + {name: "LessInt8x32", argLength: 2, commutative: false}, + {name: "LessInt8x64", argLength: 2, commutative: false}, + {name: "LessInt16x8", argLength: 2, commutative: false}, + {name: "LessInt16x16", argLength: 2, commutative: false}, + {name: "LessInt16x32", argLength: 2, commutative: false}, + {name: "LessInt32x4", argLength: 2, commutative: false}, + {name: "LessInt32x8", argLength: 2, commutative: false}, + {name: "LessInt32x16", argLength: 2, commutative: false}, + {name: "LessInt64x2", argLength: 2, commutative: false}, + {name: "LessInt64x4", argLength: 2, commutative: false}, + {name: "LessInt64x8", argLength: 2, commutative: false}, + {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x64", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x2", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x64", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, + {name: "LessUint8x16", argLength: 2, commutative: false}, + {name: "LessUint8x32", argLength: 2, commutative: false}, + {name: "LessUint8x64", argLength: 2, commutative: false}, + {name: "LessUint16x8", argLength: 2, commutative: false}, + {name: "LessUint16x16", argLength: 2, commutative: false}, + {name: "LessUint16x32", argLength: 2, commutative: false}, + {name: "LessUint32x4", argLength: 2, commutative: false}, + {name: "LessUint32x8", argLength: 2, commutative: false}, + {name: "LessUint32x16", argLength: 2, commutative: false}, + {name: "LessUint64x2", argLength: 2, commutative: false}, + {name: "LessUint64x4", argLength: 2, commutative: false}, + {name: "LessUint64x8", argLength: 2, commutative: false}, + {name: "MaxFloat32x4", argLength: 2, commutative: true}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, + {name: "MaxFloat32x16", argLength: 2, commutative: true}, + {name: "MaxFloat64x2", argLength: 2, commutative: true}, + {name: "MaxFloat64x4", argLength: 2, commutative: true}, + {name: "MaxFloat64x8", argLength: 2, commutative: true}, + {name: "MaxInt8x16", argLength: 2, commutative: true}, + {name: "MaxInt8x32", argLength: 2, commutative: true}, + {name: "MaxInt8x64", argLength: 2, commutative: true}, + {name: "MaxInt16x8", argLength: 2, commutative: true}, + {name: "MaxInt16x16", argLength: 2, commutative: true}, + {name: "MaxInt16x32", argLength: 2, commutative: true}, + {name: "MaxInt32x4", argLength: 2, commutative: true}, + {name: "MaxInt32x8", argLength: 2, commutative: true}, + {name: "MaxInt32x16", argLength: 2, commutative: true}, + {name: "MaxInt64x2", argLength: 2, commutative: true}, + {name: "MaxInt64x4", argLength: 2, commutative: true}, + {name: "MaxInt64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MaxMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "MaxMaskedInt8x16", argLength: 3, commutative: true}, + {name: "MaxMaskedInt8x32", argLength: 3, commutative: true}, + {name: "MaxMaskedInt8x64", argLength: 3, commutative: true}, + {name: "MaxMaskedInt16x8", argLength: 3, commutative: true}, + {name: "MaxMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MaxMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MaxMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MaxMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MaxMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MaxMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MaxMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MaxMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MaxMaskedUint8x16", argLength: 3, commutative: true}, + {name: "MaxMaskedUint8x32", argLength: 3, commutative: true}, + {name: "MaxMaskedUint8x64", argLength: 3, commutative: true}, + {name: "MaxMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MaxMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MaxMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MaxMaskedUint32x4", argLength: 3, commutative: true}, + {name: "MaxMaskedUint32x8", argLength: 3, commutative: true}, + {name: "MaxMaskedUint32x16", argLength: 3, commutative: true}, + {name: "MaxMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MaxMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MaxMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MaxUint8x16", argLength: 2, commutative: true}, + {name: "MaxUint8x32", argLength: 2, commutative: true}, + {name: "MaxUint8x64", argLength: 2, commutative: true}, + {name: "MaxUint16x8", argLength: 2, commutative: true}, + {name: "MaxUint16x16", argLength: 2, commutative: true}, + {name: "MaxUint16x32", argLength: 2, commutative: true}, + {name: "MaxUint32x4", argLength: 2, commutative: true}, + {name: "MaxUint32x8", argLength: 2, commutative: true}, + {name: "MaxUint32x16", argLength: 2, commutative: true}, + {name: "MaxUint64x2", argLength: 2, commutative: true}, + {name: "MaxUint64x4", argLength: 2, commutative: true}, + {name: "MaxUint64x8", argLength: 2, commutative: true}, + {name: "MinFloat32x4", argLength: 2, commutative: true}, {name: "MinFloat32x8", argLength: 2, commutative: true}, + {name: "MinFloat32x16", argLength: 2, commutative: true}, + {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MinFloat64x4", argLength: 2, commutative: true}, + {name: "MinFloat64x8", argLength: 2, commutative: true}, + {name: "MinInt8x16", argLength: 2, commutative: true}, + {name: "MinInt8x32", argLength: 2, commutative: true}, + {name: "MinInt8x64", argLength: 2, commutative: true}, + {name: "MinInt16x8", argLength: 2, commutative: true}, + {name: "MinInt16x16", argLength: 2, commutative: true}, + {name: "MinInt16x32", argLength: 2, commutative: true}, + {name: "MinInt32x4", argLength: 2, commutative: true}, + {name: "MinInt32x8", argLength: 2, commutative: true}, + {name: "MinInt32x16", argLength: 2, commutative: true}, + {name: "MinInt64x2", argLength: 2, commutative: true}, + {name: "MinInt64x4", argLength: 2, commutative: true}, + {name: "MinInt64x8", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MinMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "MulFloat32x8", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, - {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, - {name: "RoundFloat32x8", argLength: 1, commutative: false}, - {name: "SqrtFloat32x8", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "SubFloat32x8", argLength: 2, commutative: false}, - {name: "SubMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "TruncFloat32x8", argLength: 1, commutative: false}, - {name: "AddFloat64x2", argLength: 2, commutative: true}, - {name: "AddMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "AddSubFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "CeilFloat64x2", argLength: 1, commutative: false}, - {name: "CompressFloat64x2", argLength: 2, commutative: false}, - {name: "DivFloat64x2", argLength: 2, commutative: false}, - {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, - {name: "EqualFloat64x2", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "FloorFloat64x2", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "GreaterFloat64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "IsNanFloat64x2", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "LessFloat64x2", argLength: 2, commutative: false}, - {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "MaxFloat64x2", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MinMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "MulFloat64x2", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, - {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, - {name: "RoundFloat64x2", argLength: 1, commutative: false}, - {name: "SqrtFloat64x2", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "SubFloat64x2", argLength: 2, commutative: false}, - {name: "SubMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "TruncFloat64x2", argLength: 1, commutative: false}, - {name: "AddFloat64x4", argLength: 2, commutative: true}, - {name: "AddMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "AddSubFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "CeilFloat64x4", argLength: 1, commutative: false}, - {name: "CompressFloat64x4", argLength: 2, commutative: false}, - {name: "DivFloat64x4", argLength: 2, commutative: false}, - {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "EqualFloat64x4", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "FloorFloat64x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "GreaterFloat64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "IsNanFloat64x4", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "LessFloat64x4", argLength: 2, commutative: false}, - {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "MaxFloat64x4", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "MinFloat64x4", argLength: 2, commutative: true}, {name: "MinMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "MulFloat64x4", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, - {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, - {name: "RoundFloat64x4", argLength: 1, commutative: false}, - {name: "SqrtFloat64x4", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "SubFloat64x4", argLength: 2, commutative: false}, - {name: "SubMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "TruncFloat64x4", argLength: 1, commutative: false}, - {name: "AddFloat64x8", argLength: 2, commutative: true}, - {name: "AddMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "CompressFloat64x8", argLength: 2, commutative: false}, - {name: "DivFloat64x8", argLength: 2, commutative: false}, - {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "EqualFloat64x8", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "GreaterFloat64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "IsNanFloat64x8", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "LessFloat64x8", argLength: 2, commutative: false}, - {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "MaxFloat64x8", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "MinFloat64x8", argLength: 2, commutative: true}, {name: "MinMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "MulFloat64x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x16", argLength: 3, commutative: true}, + {name: "MinMaskedInt8x32", argLength: 3, commutative: true}, + {name: "MinMaskedInt8x64", argLength: 3, commutative: true}, + {name: "MinMaskedInt16x8", argLength: 3, commutative: true}, + {name: "MinMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MinMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MinMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MinMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MinMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MinMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MinMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MinMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MinMaskedUint8x16", argLength: 3, commutative: true}, + {name: "MinMaskedUint8x32", argLength: 3, commutative: true}, + {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, + {name: "MinMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MinMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MinMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MinMaskedUint32x4", argLength: 3, commutative: true}, + {name: "MinMaskedUint32x8", argLength: 3, commutative: true}, + {name: "MinMaskedUint32x16", argLength: 3, commutative: true}, + {name: "MinMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MinMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MinMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MinUint8x16", argLength: 2, commutative: true}, + {name: "MinUint8x32", argLength: 2, commutative: true}, + {name: "MinUint8x64", argLength: 2, commutative: true}, + {name: "MinUint16x8", argLength: 2, commutative: true}, + {name: "MinUint16x16", argLength: 2, commutative: true}, + {name: "MinUint16x32", argLength: 2, commutative: true}, + {name: "MinUint32x4", argLength: 2, commutative: true}, + {name: "MinUint32x8", argLength: 2, commutative: true}, + {name: "MinUint32x16", argLength: 2, commutative: true}, + {name: "MinUint64x2", argLength: 2, commutative: true}, + {name: "MinUint64x4", argLength: 2, commutative: true}, + {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, {name: "MulByPowOf2MaskedFloat64x8", argLength: 3, commutative: false}, - {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "SqrtFloat64x8", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "SubFloat64x8", argLength: 2, commutative: false}, - {name: "SubMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, - {name: "AddInt16x16", argLength: 2, commutative: true}, - {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, - {name: "AndInt16x16", argLength: 2, commutative: true}, - {name: "AndNotInt16x16", argLength: 2, commutative: false}, - {name: "CompressInt16x16", argLength: 2, commutative: false}, - {name: "EqualInt16x16", argLength: 2, commutative: true}, - {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, - {name: "GreaterInt16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt16x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x16", argLength: 3, commutative: false}, - {name: "LessInt16x16", argLength: 2, commutative: false}, - {name: "LessEqualInt16x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt16x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x16", argLength: 3, commutative: false}, - {name: "MaxInt16x16", argLength: 2, commutative: true}, - {name: "MaxMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MinInt16x16", argLength: 2, commutative: true}, - {name: "MinMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, + {name: "MulFloat32x4", argLength: 2, commutative: true}, + {name: "MulFloat32x8", argLength: 2, commutative: true}, + {name: "MulFloat32x16", argLength: 2, commutative: true}, + {name: "MulFloat64x2", argLength: 2, commutative: true}, + {name: "MulFloat64x4", argLength: 2, commutative: true}, + {name: "MulFloat64x8", argLength: 2, commutative: true}, + {name: "MulHighInt16x8", argLength: 2, commutative: true}, {name: "MulHighInt16x16", argLength: 2, commutative: true}, + {name: "MulHighInt16x32", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "MulHighUint16x32", argLength: 2, commutative: true}, + {name: "MulLowInt16x8", argLength: 2, commutative: true}, {name: "MulLowInt16x16", argLength: 2, commutative: true}, + {name: "MulLowInt16x32", argLength: 2, commutative: true}, + {name: "MulLowInt32x4", argLength: 2, commutative: true}, + {name: "MulLowInt32x8", argLength: 2, commutative: true}, + {name: "MulLowInt32x16", argLength: 2, commutative: true}, + {name: "MulLowInt64x2", argLength: 2, commutative: true}, + {name: "MulLowInt64x4", argLength: 2, commutative: true}, + {name: "MulLowInt64x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulLowMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, + {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, + {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, + {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, + {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, + {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, + {name: "NotEqualInt8x16", argLength: 2, commutative: true}, + {name: "NotEqualInt8x32", argLength: 2, commutative: true}, + {name: "NotEqualInt8x64", argLength: 2, commutative: true}, + {name: "NotEqualInt16x8", argLength: 2, commutative: true}, {name: "NotEqualInt16x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt16x16", argLength: 3, commutative: true}, - {name: "OrInt16x16", argLength: 2, commutative: true}, - {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, - {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "PopCountInt16x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, - {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "SignInt16x16", argLength: 2, commutative: false}, - {name: "SubInt16x16", argLength: 2, commutative: false}, - {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, - {name: "XorInt16x16", argLength: 2, commutative: true}, - {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, - {name: "AddInt16x32", argLength: 2, commutative: true}, - {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, - {name: "CompressInt16x32", argLength: 2, commutative: false}, - {name: "EqualInt16x32", argLength: 2, commutative: true}, - {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, - {name: "GreaterInt16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt16x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x32", argLength: 3, commutative: false}, - {name: "LessInt16x32", argLength: 2, commutative: false}, - {name: "LessEqualInt16x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt16x32", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x32", argLength: 3, commutative: false}, - {name: "MaxInt16x32", argLength: 2, commutative: true}, - {name: "MaxMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MinInt16x32", argLength: 2, commutative: true}, - {name: "MinMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulHighInt16x32", argLength: 2, commutative: true}, - {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulLowInt16x32", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt16x32", argLength: 3, commutative: true}, - {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, - {name: "PopCountInt16x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, - {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, - {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt16x32", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "SubInt16x32", argLength: 2, commutative: false}, - {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, - {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, - {name: "AddInt16x8", argLength: 2, commutative: true}, - {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, - {name: "AndInt16x8", argLength: 2, commutative: true}, - {name: "AndNotInt16x8", argLength: 2, commutative: false}, - {name: "CompressInt16x8", argLength: 2, commutative: false}, - {name: "EqualInt16x8", argLength: 2, commutative: true}, - {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, - {name: "GreaterInt16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt16x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x8", argLength: 3, commutative: false}, - {name: "LessInt16x8", argLength: 2, commutative: false}, - {name: "LessEqualInt16x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt16x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x8", argLength: 3, commutative: false}, - {name: "MaxInt16x8", argLength: 2, commutative: true}, - {name: "MaxMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MinInt16x8", argLength: 2, commutative: true}, - {name: "MinMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulHighInt16x8", argLength: 2, commutative: true}, - {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulLowInt16x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, - {name: "NotEqualInt16x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt16x8", argLength: 3, commutative: true}, - {name: "OrInt16x8", argLength: 2, commutative: true}, - {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, - {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "PopCountInt16x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, - {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "SignInt16x8", argLength: 2, commutative: false}, - {name: "SubInt16x8", argLength: 2, commutative: false}, - {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, - {name: "XorInt16x8", argLength: 2, commutative: true}, - {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, - {name: "AddInt32x16", argLength: 2, commutative: true}, - {name: "AddMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AndInt32x16", argLength: 2, commutative: true}, - {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AndNotInt32x16", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, - {name: "CompressInt32x16", argLength: 2, commutative: false}, - {name: "EqualInt32x16", argLength: 2, commutative: true}, - {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, - {name: "GreaterInt32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x16", argLength: 3, commutative: false}, - {name: "LessInt32x16", argLength: 2, commutative: false}, - {name: "LessEqualInt32x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt32x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x16", argLength: 3, commutative: false}, - {name: "MaxInt32x16", argLength: 2, commutative: true}, - {name: "MaxMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MinInt32x16", argLength: 2, commutative: true}, - {name: "MinMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MulLowInt32x16", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, + {name: "NotEqualInt32x4", argLength: 2, commutative: true}, + {name: "NotEqualInt32x8", argLength: 2, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, + {name: "NotEqualInt64x2", argLength: 2, commutative: true}, + {name: "NotEqualInt64x4", argLength: 2, commutative: true}, + {name: "NotEqualInt64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt8x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt8x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt8x64", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt16x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt16x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt16x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt32x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "NotEqualMaskedInt32x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt64x2", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt64x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt64x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint16x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint16x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint32x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint32x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, + {name: "NotEqualUint8x16", argLength: 2, commutative: true}, + {name: "NotEqualUint8x32", argLength: 2, commutative: true}, + {name: "NotEqualUint8x64", argLength: 2, commutative: true}, + {name: "NotEqualUint16x8", argLength: 2, commutative: true}, + {name: "NotEqualUint16x16", argLength: 2, commutative: true}, + {name: "NotEqualUint16x32", argLength: 2, commutative: true}, + {name: "NotEqualUint32x4", argLength: 2, commutative: true}, + {name: "NotEqualUint32x8", argLength: 2, commutative: true}, + {name: "NotEqualUint32x16", argLength: 2, commutative: true}, + {name: "NotEqualUint64x2", argLength: 2, commutative: true}, + {name: "NotEqualUint64x4", argLength: 2, commutative: true}, + {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "OrInt8x16", argLength: 2, commutative: true}, + {name: "OrInt8x32", argLength: 2, commutative: true}, + {name: "OrInt16x8", argLength: 2, commutative: true}, + {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "OrInt32x8", argLength: 2, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "OrInt64x2", argLength: 2, commutative: true}, + {name: "OrInt64x4", argLength: 2, commutative: true}, + {name: "OrInt64x8", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x4", argLength: 3, commutative: true}, + {name: "OrMaskedInt32x8", argLength: 3, commutative: true}, {name: "OrMaskedInt32x16", argLength: 3, commutative: true}, + {name: "OrMaskedInt64x2", argLength: 3, commutative: true}, + {name: "OrMaskedInt64x4", argLength: 3, commutative: true}, + {name: "OrMaskedInt64x8", argLength: 3, commutative: true}, + {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, + {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, + {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, + {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, + {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, + {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "OrUint8x16", argLength: 2, commutative: true}, + {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "OrUint16x8", argLength: 2, commutative: true}, + {name: "OrUint16x16", argLength: 2, commutative: true}, + {name: "OrUint32x4", argLength: 2, commutative: true}, + {name: "OrUint32x8", argLength: 2, commutative: true}, + {name: "OrUint32x16", argLength: 2, commutative: true}, + {name: "OrUint64x2", argLength: 2, commutative: true}, + {name: "OrUint64x4", argLength: 2, commutative: true}, + {name: "OrUint64x8", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "PairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "PopCountInt32x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, - {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "RotateRightInt32x16", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftAllLeftInt32x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt32x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "SubInt32x16", argLength: 2, commutative: false}, - {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "XorInt32x16", argLength: 2, commutative: true}, - {name: "XorMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, - {name: "AddInt32x4", argLength: 2, commutative: true}, - {name: "AddMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AndInt32x4", argLength: 2, commutative: true}, - {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AndNotInt32x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, - {name: "CompressInt32x4", argLength: 2, commutative: false}, - {name: "EqualInt32x4", argLength: 2, commutative: true}, - {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, - {name: "GreaterInt32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt32x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x4", argLength: 3, commutative: false}, - {name: "LessInt32x4", argLength: 2, commutative: false}, - {name: "LessEqualInt32x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt32x4", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x4", argLength: 3, commutative: false}, - {name: "MaxInt32x4", argLength: 2, commutative: true}, - {name: "MaxMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MinInt32x4", argLength: 2, commutative: true}, - {name: "MinMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, - {name: "MulLowInt32x4", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, - {name: "NotEqualInt32x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt32x4", argLength: 3, commutative: true}, - {name: "OrInt32x4", argLength: 2, commutative: true}, - {name: "OrMaskedInt32x4", argLength: 3, commutative: true}, - {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, + {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, + {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, + {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, + {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, + {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, + {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, + {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, + {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, + {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, + {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, + {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, + {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, + {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, + {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "Permute2Float32x16", argLength: 3, commutative: false}, + {name: "Permute2Float64x2", argLength: 3, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, + {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2Int8x16", argLength: 3, commutative: false}, + {name: "Permute2Int8x32", argLength: 3, commutative: false}, + {name: "Permute2Int8x64", argLength: 3, commutative: false}, + {name: "Permute2Int16x8", argLength: 3, commutative: false}, + {name: "Permute2Int16x16", argLength: 3, commutative: false}, + {name: "Permute2Int16x32", argLength: 3, commutative: false}, + {name: "Permute2Int32x4", argLength: 3, commutative: false}, + {name: "Permute2Int32x8", argLength: 3, commutative: false}, + {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "Permute2Int64x2", argLength: 3, commutative: false}, + {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, + {name: "Permute2Uint8x16", argLength: 3, commutative: false}, + {name: "Permute2Uint8x32", argLength: 3, commutative: false}, + {name: "Permute2Uint8x64", argLength: 3, commutative: false}, + {name: "Permute2Uint16x8", argLength: 3, commutative: false}, + {name: "Permute2Uint16x16", argLength: 3, commutative: false}, + {name: "Permute2Uint16x32", argLength: 3, commutative: false}, + {name: "Permute2Uint32x4", argLength: 3, commutative: false}, + {name: "Permute2Uint32x8", argLength: 3, commutative: false}, + {name: "Permute2Uint32x16", argLength: 3, commutative: false}, + {name: "Permute2Uint64x2", argLength: 3, commutative: false}, + {name: "Permute2Uint64x4", argLength: 3, commutative: false}, + {name: "Permute2Uint64x8", argLength: 3, commutative: false}, + {name: "PermuteFloat32x8", argLength: 2, commutative: false}, + {name: "PermuteFloat32x16", argLength: 2, commutative: false}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, + {name: "PermuteFloat64x8", argLength: 2, commutative: false}, + {name: "PermuteInt8x16", argLength: 2, commutative: false}, + {name: "PermuteInt8x32", argLength: 2, commutative: false}, + {name: "PermuteInt8x64", argLength: 2, commutative: false}, + {name: "PermuteInt16x8", argLength: 2, commutative: false}, + {name: "PermuteInt16x16", argLength: 2, commutative: false}, + {name: "PermuteInt16x32", argLength: 2, commutative: false}, + {name: "PermuteInt32x8", argLength: 2, commutative: false}, + {name: "PermuteInt32x16", argLength: 2, commutative: false}, + {name: "PermuteInt64x4", argLength: 2, commutative: false}, + {name: "PermuteInt64x8", argLength: 2, commutative: false}, + {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, + {name: "PermuteUint8x16", argLength: 2, commutative: false}, + {name: "PermuteUint8x32", argLength: 2, commutative: false}, + {name: "PermuteUint8x64", argLength: 2, commutative: false}, + {name: "PermuteUint16x8", argLength: 2, commutative: false}, + {name: "PermuteUint16x16", argLength: 2, commutative: false}, + {name: "PermuteUint16x32", argLength: 2, commutative: false}, + {name: "PermuteUint32x8", argLength: 2, commutative: false}, + {name: "PermuteUint32x16", argLength: 2, commutative: false}, + {name: "PermuteUint64x4", argLength: 2, commutative: false}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, + {name: "PopCountInt8x16", argLength: 1, commutative: false}, + {name: "PopCountInt8x32", argLength: 1, commutative: false}, + {name: "PopCountInt8x64", argLength: 1, commutative: false}, + {name: "PopCountInt16x8", argLength: 1, commutative: false}, + {name: "PopCountInt16x16", argLength: 1, commutative: false}, + {name: "PopCountInt16x32", argLength: 1, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, {name: "PopCountMaskedInt32x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, + {name: "PopCountUint8x16", argLength: 1, commutative: false}, + {name: "PopCountUint8x32", argLength: 1, commutative: false}, + {name: "PopCountUint8x64", argLength: 1, commutative: false}, + {name: "PopCountUint16x8", argLength: 1, commutative: false}, + {name: "PopCountUint16x16", argLength: 1, commutative: false}, + {name: "PopCountUint16x32", argLength: 1, commutative: false}, + {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, + {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, + {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, + {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, + {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, + {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, {name: "RotateLeftMaskedInt32x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt32x8", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt32x16", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt64x2", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt64x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt64x8", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint64x2", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint64x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint64x8", argLength: 3, commutative: false}, + {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, + {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, + {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, + {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, + {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, + {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, {name: "RotateRightInt32x4", argLength: 2, commutative: false}, + {name: "RotateRightInt32x8", argLength: 2, commutative: false}, + {name: "RotateRightInt32x16", argLength: 2, commutative: false}, + {name: "RotateRightInt64x2", argLength: 2, commutative: false}, + {name: "RotateRightInt64x4", argLength: 2, commutative: false}, + {name: "RotateRightInt64x8", argLength: 2, commutative: false}, {name: "RotateRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "SignInt32x4", argLength: 2, commutative: false}, - {name: "SubInt32x4", argLength: 2, commutative: false}, - {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "XorInt32x4", argLength: 2, commutative: true}, - {name: "XorMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, - {name: "AddInt32x8", argLength: 2, commutative: true}, - {name: "AddMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AndInt32x8", argLength: 2, commutative: true}, - {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AndNotInt32x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, - {name: "CompressInt32x8", argLength: 2, commutative: false}, - {name: "EqualInt32x8", argLength: 2, commutative: true}, - {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, - {name: "GreaterInt32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x8", argLength: 3, commutative: false}, - {name: "LessInt32x8", argLength: 2, commutative: false}, - {name: "LessEqualInt32x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt32x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x8", argLength: 3, commutative: false}, - {name: "MaxInt32x8", argLength: 2, commutative: true}, - {name: "MaxMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MinInt32x8", argLength: 2, commutative: true}, - {name: "MinMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, - {name: "MulLowInt32x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, - {name: "NotEqualInt32x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt32x8", argLength: 3, commutative: true}, - {name: "OrInt32x8", argLength: 2, commutative: true}, - {name: "OrMaskedInt32x8", argLength: 3, commutative: true}, - {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, - {name: "PopCountInt32x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, - {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt32x8", argLength: 3, commutative: false}, - {name: "RotateRightInt32x8", argLength: 2, commutative: false}, {name: "RotateRightMaskedInt32x8", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt32x16", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt64x2", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt64x4", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt64x8", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint64x2", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint64x4", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint64x8", argLength: 3, commutative: false}, + {name: "RotateRightUint32x4", argLength: 2, commutative: false}, + {name: "RotateRightUint32x8", argLength: 2, commutative: false}, + {name: "RotateRightUint32x16", argLength: 2, commutative: false}, + {name: "RotateRightUint64x2", argLength: 2, commutative: false}, + {name: "RotateRightUint64x4", argLength: 2, commutative: false}, + {name: "RotateRightUint64x8", argLength: 2, commutative: false}, + {name: "RoundFloat32x4", argLength: 1, commutative: false}, + {name: "RoundFloat32x8", argLength: 1, commutative: false}, + {name: "RoundFloat64x2", argLength: 1, commutative: false}, + {name: "RoundFloat64x4", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, + {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, + {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, + {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftAllLeftMaskedInt32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "SignInt32x8", argLength: 2, commutative: false}, - {name: "SubInt32x8", argLength: 2, commutative: false}, - {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "XorInt32x8", argLength: 2, commutative: true}, - {name: "XorMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, - {name: "AddInt64x2", argLength: 2, commutative: true}, - {name: "AddMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AndInt64x2", argLength: 2, commutative: true}, - {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AndNotInt64x2", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, - {name: "CompressInt64x2", argLength: 2, commutative: false}, - {name: "EqualInt64x2", argLength: 2, commutative: true}, - {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, - {name: "GreaterInt64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x2", argLength: 3, commutative: false}, - {name: "LessInt64x2", argLength: 2, commutative: false}, - {name: "LessEqualInt64x2", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt64x2", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x2", argLength: 3, commutative: false}, - {name: "MaxInt64x2", argLength: 2, commutative: true}, - {name: "MaxMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MinInt64x2", argLength: 2, commutative: true}, - {name: "MinMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulLowInt64x2", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, - {name: "NotEqualInt64x2", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt64x2", argLength: 3, commutative: true}, - {name: "OrInt64x2", argLength: 2, commutative: true}, - {name: "OrMaskedInt64x2", argLength: 3, commutative: true}, - {name: "PopCountInt64x2", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, - {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "RotateRightInt64x2", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "SubInt64x2", argLength: 2, commutative: false}, - {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, - {name: "XorInt64x2", argLength: 2, commutative: true}, - {name: "XorMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, - {name: "AddInt64x4", argLength: 2, commutative: true}, - {name: "AddMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AndInt64x4", argLength: 2, commutative: true}, - {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AndNotInt64x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, - {name: "CompressInt64x4", argLength: 2, commutative: false}, - {name: "EqualInt64x4", argLength: 2, commutative: true}, - {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, - {name: "GreaterInt64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x4", argLength: 3, commutative: false}, - {name: "LessInt64x4", argLength: 2, commutative: false}, - {name: "LessEqualInt64x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt64x4", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x4", argLength: 3, commutative: false}, - {name: "MaxInt64x4", argLength: 2, commutative: true}, - {name: "MaxMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MinInt64x4", argLength: 2, commutative: true}, - {name: "MinMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulLowInt64x4", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, - {name: "NotEqualInt64x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt64x4", argLength: 3, commutative: true}, - {name: "OrInt64x4", argLength: 2, commutative: true}, - {name: "OrMaskedInt64x4", argLength: 3, commutative: true}, - {name: "PopCountInt64x4", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, - {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "RotateRightInt64x4", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, {name: "ShiftLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, + {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightMaskedInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, + {name: "SignInt8x16", argLength: 2, commutative: false}, + {name: "SignInt8x32", argLength: 2, commutative: false}, + {name: "SignInt16x8", argLength: 2, commutative: false}, + {name: "SignInt16x16", argLength: 2, commutative: false}, + {name: "SignInt32x4", argLength: 2, commutative: false}, + {name: "SignInt32x8", argLength: 2, commutative: false}, + {name: "SqrtFloat32x4", argLength: 1, commutative: false}, + {name: "SqrtFloat32x8", argLength: 1, commutative: false}, + {name: "SqrtFloat32x16", argLength: 1, commutative: false}, + {name: "SqrtFloat64x2", argLength: 1, commutative: false}, + {name: "SqrtFloat64x4", argLength: 1, commutative: false}, + {name: "SqrtFloat64x8", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat32x8", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "SubFloat32x16", argLength: 2, commutative: false}, + {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "SubFloat64x8", argLength: 2, commutative: false}, + {name: "SubInt8x16", argLength: 2, commutative: false}, + {name: "SubInt8x32", argLength: 2, commutative: false}, + {name: "SubInt8x64", argLength: 2, commutative: false}, + {name: "SubInt16x8", argLength: 2, commutative: false}, + {name: "SubInt16x16", argLength: 2, commutative: false}, + {name: "SubInt16x32", argLength: 2, commutative: false}, + {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "SubInt64x2", argLength: 2, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, + {name: "SubInt64x8", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "SubMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "SubMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "SubMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "SubMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "SubMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "SubMaskedInt8x16", argLength: 3, commutative: false}, + {name: "SubMaskedInt8x32", argLength: 3, commutative: false}, + {name: "SubMaskedInt8x64", argLength: 3, commutative: false}, + {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, + {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, + {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, + {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, + {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, + {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, + {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, {name: "SubMaskedInt64x4", argLength: 3, commutative: false}, - {name: "XorInt64x4", argLength: 2, commutative: true}, - {name: "XorMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, - {name: "AddInt64x8", argLength: 2, commutative: true}, - {name: "AddMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AndInt64x8", argLength: 2, commutative: true}, - {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AndNotInt64x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, - {name: "CompressInt64x8", argLength: 2, commutative: false}, - {name: "EqualInt64x8", argLength: 2, commutative: true}, - {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, - {name: "GreaterInt64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x8", argLength: 3, commutative: false}, - {name: "LessInt64x8", argLength: 2, commutative: false}, - {name: "LessEqualInt64x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt64x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x8", argLength: 3, commutative: false}, - {name: "MaxInt64x8", argLength: 2, commutative: true}, - {name: "MaxMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MinInt64x8", argLength: 2, commutative: true}, - {name: "MinMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MulLowInt64x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, - {name: "NotEqualInt64x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt64x8", argLength: 3, commutative: true}, - {name: "OrInt64x8", argLength: 2, commutative: true}, - {name: "OrMaskedInt64x8", argLength: 3, commutative: true}, - {name: "PopCountInt64x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, - {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "RotateRightInt64x8", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "SubInt64x8", argLength: 2, commutative: false}, {name: "SubMaskedInt64x8", argLength: 3, commutative: false}, - {name: "XorInt64x8", argLength: 2, commutative: true}, - {name: "XorMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, - {name: "AddInt8x16", argLength: 2, commutative: true}, - {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, - {name: "AndInt8x16", argLength: 2, commutative: true}, - {name: "AndNotInt8x16", argLength: 2, commutative: false}, - {name: "CompressInt8x16", argLength: 2, commutative: false}, - {name: "EqualInt8x16", argLength: 2, commutative: true}, - {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, - {name: "GreaterInt8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt8x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x16", argLength: 3, commutative: false}, - {name: "LessInt8x16", argLength: 2, commutative: false}, - {name: "LessEqualInt8x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt8x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x16", argLength: 3, commutative: false}, - {name: "MaxInt8x16", argLength: 2, commutative: true}, - {name: "MaxMaskedInt8x16", argLength: 3, commutative: true}, - {name: "MinInt8x16", argLength: 2, commutative: true}, - {name: "MinMaskedInt8x16", argLength: 3, commutative: true}, - {name: "NotEqualInt8x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt8x16", argLength: 3, commutative: true}, - {name: "OrInt8x16", argLength: 2, commutative: true}, - {name: "PopCountInt8x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, - {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, - {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, - {name: "SignInt8x16", argLength: 2, commutative: false}, - {name: "SubInt8x16", argLength: 2, commutative: false}, - {name: "SubMaskedInt8x16", argLength: 3, commutative: false}, - {name: "XorInt8x16", argLength: 2, commutative: true}, - {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, - {name: "AddInt8x32", argLength: 2, commutative: true}, - {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, - {name: "AndInt8x32", argLength: 2, commutative: true}, - {name: "AndNotInt8x32", argLength: 2, commutative: false}, - {name: "CompressInt8x32", argLength: 2, commutative: false}, - {name: "EqualInt8x32", argLength: 2, commutative: true}, - {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, - {name: "GreaterInt8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt8x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x32", argLength: 3, commutative: false}, - {name: "LessInt8x32", argLength: 2, commutative: false}, - {name: "LessEqualInt8x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt8x32", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x32", argLength: 3, commutative: false}, - {name: "MaxInt8x32", argLength: 2, commutative: true}, - {name: "MaxMaskedInt8x32", argLength: 3, commutative: true}, - {name: "MinInt8x32", argLength: 2, commutative: true}, - {name: "MinMaskedInt8x32", argLength: 3, commutative: true}, - {name: "NotEqualInt8x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt8x32", argLength: 3, commutative: true}, - {name: "OrInt8x32", argLength: 2, commutative: true}, - {name: "PopCountInt8x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, - {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, - {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, - {name: "SignInt8x32", argLength: 2, commutative: false}, - {name: "SubInt8x32", argLength: 2, commutative: false}, - {name: "SubMaskedInt8x32", argLength: 3, commutative: false}, - {name: "XorInt8x32", argLength: 2, commutative: true}, - {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, - {name: "AddInt8x64", argLength: 2, commutative: true}, - {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, - {name: "CompressInt8x64", argLength: 2, commutative: false}, - {name: "EqualInt8x64", argLength: 2, commutative: true}, - {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, - {name: "GreaterInt8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt8x64", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x64", argLength: 3, commutative: false}, - {name: "LessInt8x64", argLength: 2, commutative: false}, - {name: "LessEqualInt8x64", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt8x64", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x64", argLength: 3, commutative: false}, - {name: "MaxInt8x64", argLength: 2, commutative: true}, - {name: "MaxMaskedInt8x64", argLength: 3, commutative: true}, - {name: "MinInt8x64", argLength: 2, commutative: true}, - {name: "MinMaskedInt8x64", argLength: 3, commutative: true}, - {name: "NotEqualInt8x64", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt8x64", argLength: 3, commutative: true}, - {name: "PopCountInt8x64", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, - {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, - {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, - {name: "SubInt8x64", argLength: 2, commutative: false}, - {name: "SubMaskedInt8x64", argLength: 3, commutative: false}, - {name: "AddUint16x16", argLength: 2, commutative: true}, - {name: "AddMaskedUint16x16", argLength: 3, commutative: true}, - {name: "AndUint16x16", argLength: 2, commutative: true}, - {name: "AndNotUint16x16", argLength: 2, commutative: false}, - {name: "AverageUint16x16", argLength: 2, commutative: true}, - {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, - {name: "CompressUint16x16", argLength: 2, commutative: false}, - {name: "EqualUint16x16", argLength: 2, commutative: true}, - {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, - {name: "GreaterUint16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint16x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x16", argLength: 3, commutative: false}, - {name: "LessUint16x16", argLength: 2, commutative: false}, - {name: "LessEqualUint16x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint16x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x16", argLength: 3, commutative: false}, - {name: "MaxUint16x16", argLength: 2, commutative: true}, - {name: "MaxMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MinUint16x16", argLength: 2, commutative: true}, - {name: "MinMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MulHighUint16x16", argLength: 2, commutative: true}, - {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, - {name: "NotEqualUint16x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint16x16", argLength: 3, commutative: true}, - {name: "OrUint16x16", argLength: 2, commutative: true}, - {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, - {name: "PermuteInt16x16", argLength: 2, commutative: false}, - {name: "PermuteUint16x16", argLength: 2, commutative: false}, - {name: "Permute2Uint16x16", argLength: 3, commutative: false}, - {name: "Permute2Int16x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, - {name: "PopCountUint16x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, - {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SubMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SubMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, {name: "SubMaskedUint16x16", argLength: 3, commutative: false}, - {name: "XorUint16x16", argLength: 2, commutative: true}, - {name: "AddUint16x32", argLength: 2, commutative: true}, - {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, - {name: "AverageUint16x32", argLength: 2, commutative: true}, - {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, - {name: "CompressUint16x32", argLength: 2, commutative: false}, - {name: "EqualUint16x32", argLength: 2, commutative: true}, - {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "GreaterUint16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint16x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x32", argLength: 3, commutative: false}, - {name: "LessUint16x32", argLength: 2, commutative: false}, - {name: "LessEqualUint16x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint16x32", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x32", argLength: 3, commutative: false}, - {name: "MaxUint16x32", argLength: 2, commutative: true}, - {name: "MaxMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MinUint16x32", argLength: 2, commutative: true}, - {name: "MinMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MulHighUint16x32", argLength: 2, commutative: true}, - {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, - {name: "NotEqualUint16x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "PermuteUint16x32", argLength: 2, commutative: false}, - {name: "PermuteInt16x32", argLength: 2, commutative: false}, - {name: "Permute2Uint16x32", argLength: 3, commutative: false}, - {name: "Permute2Int16x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, - {name: "PopCountUint16x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, - {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, - {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint16x32", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint16x32", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "SubMaskedUint16x32", argLength: 3, commutative: false}, - {name: "AddUint16x8", argLength: 2, commutative: true}, - {name: "AddMaskedUint16x8", argLength: 3, commutative: true}, - {name: "AndUint16x8", argLength: 2, commutative: true}, - {name: "AndNotUint16x8", argLength: 2, commutative: false}, - {name: "AverageUint16x8", argLength: 2, commutative: true}, - {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, - {name: "CompressUint16x8", argLength: 2, commutative: false}, - {name: "EqualUint16x8", argLength: 2, commutative: true}, - {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, - {name: "GreaterUint16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint16x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x8", argLength: 3, commutative: false}, - {name: "LessUint16x8", argLength: 2, commutative: false}, - {name: "LessEqualUint16x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint16x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x8", argLength: 3, commutative: false}, - {name: "MaxUint16x8", argLength: 2, commutative: true}, - {name: "MaxMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MinUint16x8", argLength: 2, commutative: true}, - {name: "MinMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MulHighUint16x8", argLength: 2, commutative: true}, - {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, - {name: "NotEqualUint16x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint16x8", argLength: 3, commutative: true}, - {name: "OrUint16x8", argLength: 2, commutative: true}, - {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, - {name: "PermuteInt16x8", argLength: 2, commutative: false}, - {name: "PermuteUint16x8", argLength: 2, commutative: false}, - {name: "Permute2Uint16x8", argLength: 3, commutative: false}, - {name: "Permute2Int16x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, - {name: "PopCountUint16x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, - {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "SubUint16x8", argLength: 2, commutative: false}, - {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, - {name: "XorUint16x8", argLength: 2, commutative: true}, - {name: "AddUint32x16", argLength: 2, commutative: true}, - {name: "AddMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AndUint32x16", argLength: 2, commutative: true}, - {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AndNotUint32x16", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, - {name: "CompressUint32x16", argLength: 2, commutative: false}, - {name: "EqualUint32x16", argLength: 2, commutative: true}, - {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, - {name: "GreaterUint32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x16", argLength: 3, commutative: false}, - {name: "LessUint32x16", argLength: 2, commutative: false}, - {name: "LessEqualUint32x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint32x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x16", argLength: 3, commutative: false}, - {name: "MaxUint32x16", argLength: 2, commutative: true}, - {name: "MaxMaskedUint32x16", argLength: 3, commutative: true}, - {name: "MinUint32x16", argLength: 2, commutative: true}, - {name: "MinMaskedUint32x16", argLength: 3, commutative: true}, - {name: "NotEqualUint32x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, - {name: "OrUint32x16", argLength: 2, commutative: true}, - {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, - {name: "PermuteInt32x16", argLength: 2, commutative: false}, - {name: "PermuteFloat32x16", argLength: 2, commutative: false}, - {name: "PermuteUint32x16", argLength: 2, commutative: false}, - {name: "Permute2Uint32x16", argLength: 3, commutative: false}, - {name: "Permute2Float32x16", argLength: 3, commutative: false}, - {name: "Permute2Int32x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, - {name: "PopCountUint32x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, - {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "RotateRightUint32x16", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "SubUint32x16", argLength: 2, commutative: false}, - {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, - {name: "XorUint32x16", argLength: 2, commutative: true}, - {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AddUint32x4", argLength: 2, commutative: true}, - {name: "AddMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AndUint32x4", argLength: 2, commutative: true}, - {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AndNotUint32x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, - {name: "CompressUint32x4", argLength: 2, commutative: false}, - {name: "EqualUint32x4", argLength: 2, commutative: true}, - {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, - {name: "GreaterUint32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint32x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x4", argLength: 3, commutative: false}, - {name: "LessUint32x4", argLength: 2, commutative: false}, - {name: "LessEqualUint32x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint32x4", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x4", argLength: 3, commutative: false}, - {name: "MaxUint32x4", argLength: 2, commutative: true}, - {name: "MaxMaskedUint32x4", argLength: 3, commutative: true}, - {name: "MinUint32x4", argLength: 2, commutative: true}, - {name: "MinMaskedUint32x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, - {name: "NotEqualUint32x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint32x4", argLength: 3, commutative: true}, - {name: "OrUint32x4", argLength: 2, commutative: true}, - {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, - {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, - {name: "Permute2Float32x4", argLength: 3, commutative: false}, - {name: "Permute2Uint32x4", argLength: 3, commutative: false}, - {name: "Permute2Int32x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, - {name: "PopCountUint32x4", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, - {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "RotateRightUint32x4", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "SubUint32x4", argLength: 2, commutative: false}, - {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, - {name: "XorUint32x4", argLength: 2, commutative: true}, - {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AddUint32x8", argLength: 2, commutative: true}, - {name: "AddMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AndUint32x8", argLength: 2, commutative: true}, - {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AndNotUint32x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, - {name: "CompressUint32x8", argLength: 2, commutative: false}, - {name: "EqualUint32x8", argLength: 2, commutative: true}, - {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, - {name: "GreaterUint32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x8", argLength: 3, commutative: false}, - {name: "LessUint32x8", argLength: 2, commutative: false}, - {name: "LessEqualUint32x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint32x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x8", argLength: 3, commutative: false}, - {name: "MaxUint32x8", argLength: 2, commutative: true}, - {name: "MaxMaskedUint32x8", argLength: 3, commutative: true}, - {name: "MinUint32x8", argLength: 2, commutative: true}, - {name: "MinMaskedUint32x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, - {name: "NotEqualUint32x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint32x8", argLength: 3, commutative: true}, - {name: "OrUint32x8", argLength: 2, commutative: true}, - {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, - {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, - {name: "PermuteUint32x8", argLength: 2, commutative: false}, - {name: "PermuteFloat32x8", argLength: 2, commutative: false}, - {name: "PermuteInt32x8", argLength: 2, commutative: false}, - {name: "Permute2Int32x8", argLength: 3, commutative: false}, - {name: "Permute2Float32x8", argLength: 3, commutative: false}, - {name: "Permute2Uint32x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "PopCountUint32x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, - {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "RotateRightUint32x8", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "SubUint32x8", argLength: 2, commutative: false}, - {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, - {name: "XorUint32x8", argLength: 2, commutative: true}, - {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AddUint64x2", argLength: 2, commutative: true}, - {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AndUint64x2", argLength: 2, commutative: true}, - {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AndNotUint64x2", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, - {name: "CompressUint64x2", argLength: 2, commutative: false}, - {name: "EqualUint64x2", argLength: 2, commutative: true}, - {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, - {name: "GreaterUint64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, - {name: "LessUint64x2", argLength: 2, commutative: false}, - {name: "LessEqualUint64x2", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, - {name: "MaxUint64x2", argLength: 2, commutative: true}, - {name: "MaxMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MinUint64x2", argLength: 2, commutative: true}, - {name: "MinMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, - {name: "NotEqualUint64x2", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, - {name: "OrUint64x2", argLength: 2, commutative: true}, - {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, - {name: "Permute2Float64x2", argLength: 3, commutative: false}, - {name: "Permute2Uint64x2", argLength: 3, commutative: false}, - {name: "Permute2Int64x2", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, - {name: "PopCountUint64x2", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, - {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "RotateRightUint64x2", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "SubUint64x2", argLength: 2, commutative: false}, - {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, - {name: "XorUint64x2", argLength: 2, commutative: true}, - {name: "XorMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AddUint64x4", argLength: 2, commutative: true}, - {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AndUint64x4", argLength: 2, commutative: true}, - {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AndNotUint64x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, - {name: "CompressUint64x4", argLength: 2, commutative: false}, - {name: "EqualUint64x4", argLength: 2, commutative: true}, - {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, - {name: "GreaterUint64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, - {name: "LessUint64x4", argLength: 2, commutative: false}, - {name: "LessEqualUint64x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, - {name: "MaxUint64x4", argLength: 2, commutative: true}, - {name: "MaxMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MinUint64x4", argLength: 2, commutative: true}, - {name: "MinMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, - {name: "NotEqualUint64x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, - {name: "OrUint64x4", argLength: 2, commutative: true}, - {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, - {name: "PermuteUint64x4", argLength: 2, commutative: false}, - {name: "PermuteInt64x4", argLength: 2, commutative: false}, - {name: "PermuteFloat64x4", argLength: 2, commutative: false}, - {name: "Permute2Uint64x4", argLength: 3, commutative: false}, - {name: "Permute2Int64x4", argLength: 3, commutative: false}, - {name: "Permute2Float64x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, - {name: "PopCountUint64x4", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, - {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "RotateRightUint64x4", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "SubUint64x4", argLength: 2, commutative: false}, - {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, - {name: "XorUint64x4", argLength: 2, commutative: true}, - {name: "XorMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AddUint64x8", argLength: 2, commutative: true}, - {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, - {name: "AndUint64x8", argLength: 2, commutative: true}, - {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, - {name: "AndNotUint64x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, - {name: "CompressUint64x8", argLength: 2, commutative: false}, - {name: "EqualUint64x8", argLength: 2, commutative: true}, - {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, - {name: "GreaterUint64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessUint64x8", argLength: 2, commutative: false}, - {name: "LessEqualUint64x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, - {name: "MaxUint64x8", argLength: 2, commutative: true}, - {name: "MaxMaskedUint64x8", argLength: 3, commutative: true}, - {name: "MinUint64x8", argLength: 2, commutative: true}, - {name: "MinMaskedUint64x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, - {name: "NotEqualUint64x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, - {name: "OrUint64x8", argLength: 2, commutative: true}, - {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, - {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "PermuteFloat64x8", argLength: 2, commutative: false}, - {name: "PermuteInt64x8", argLength: 2, commutative: false}, - {name: "Permute2Float64x8", argLength: 3, commutative: false}, - {name: "Permute2Uint64x8", argLength: 3, commutative: false}, - {name: "Permute2Int64x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, - {name: "PopCountUint64x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, - {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint64x8", argLength: 3, commutative: false}, - {name: "RotateRightUint64x8", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "SubUint64x8", argLength: 2, commutative: false}, - {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, - {name: "XorUint64x8", argLength: 2, commutative: true}, - {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, - {name: "AddUint8x16", argLength: 2, commutative: true}, - {name: "AddMaskedUint8x16", argLength: 3, commutative: true}, - {name: "AndUint8x16", argLength: 2, commutative: true}, - {name: "AndNotUint8x16", argLength: 2, commutative: false}, - {name: "AverageUint8x16", argLength: 2, commutative: true}, - {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, - {name: "CompressUint8x16", argLength: 2, commutative: false}, - {name: "EqualUint8x16", argLength: 2, commutative: true}, - {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, - {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, - {name: "GreaterUint8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint8x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x16", argLength: 3, commutative: false}, - {name: "LessUint8x16", argLength: 2, commutative: false}, - {name: "LessEqualUint8x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint8x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x16", argLength: 3, commutative: false}, - {name: "MaxUint8x16", argLength: 2, commutative: true}, - {name: "MaxMaskedUint8x16", argLength: 3, commutative: true}, - {name: "MinUint8x16", argLength: 2, commutative: true}, - {name: "MinMaskedUint8x16", argLength: 3, commutative: true}, - {name: "NotEqualUint8x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, - {name: "OrUint8x16", argLength: 2, commutative: true}, - {name: "PermuteUint8x16", argLength: 2, commutative: false}, - {name: "PermuteInt8x16", argLength: 2, commutative: false}, - {name: "Permute2Uint8x16", argLength: 3, commutative: false}, - {name: "Permute2Int8x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, - {name: "PopCountUint8x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, - {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SubUint8x16", argLength: 2, commutative: false}, - {name: "SubMaskedUint8x16", argLength: 3, commutative: false}, - {name: "XorUint8x16", argLength: 2, commutative: true}, - {name: "AddUint8x32", argLength: 2, commutative: true}, - {name: "AddMaskedUint8x32", argLength: 3, commutative: true}, - {name: "AndUint8x32", argLength: 2, commutative: true}, - {name: "AndNotUint8x32", argLength: 2, commutative: false}, - {name: "AverageUint8x32", argLength: 2, commutative: true}, - {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, - {name: "CompressUint8x32", argLength: 2, commutative: false}, - {name: "EqualUint8x32", argLength: 2, commutative: true}, - {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, - {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, - {name: "GreaterUint8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint8x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x32", argLength: 3, commutative: false}, - {name: "LessUint8x32", argLength: 2, commutative: false}, - {name: "LessEqualUint8x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint8x32", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x32", argLength: 3, commutative: false}, - {name: "MaxUint8x32", argLength: 2, commutative: true}, - {name: "MaxMaskedUint8x32", argLength: 3, commutative: true}, - {name: "MinUint8x32", argLength: 2, commutative: true}, - {name: "MinMaskedUint8x32", argLength: 3, commutative: true}, - {name: "NotEqualUint8x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, - {name: "OrUint8x32", argLength: 2, commutative: true}, - {name: "PermuteUint8x32", argLength: 2, commutative: false}, - {name: "PermuteInt8x32", argLength: 2, commutative: false}, - {name: "Permute2Int8x32", argLength: 3, commutative: false}, - {name: "Permute2Uint8x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, - {name: "PopCountUint8x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, - {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SubUint8x32", argLength: 2, commutative: false}, - {name: "SubMaskedUint8x32", argLength: 3, commutative: false}, - {name: "XorUint8x32", argLength: 2, commutative: true}, - {name: "AddUint8x64", argLength: 2, commutative: true}, - {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, - {name: "AverageUint8x64", argLength: 2, commutative: true}, - {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, - {name: "CompressUint8x64", argLength: 2, commutative: false}, - {name: "EqualUint8x64", argLength: 2, commutative: true}, - {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, - {name: "GreaterUint8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint8x64", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x64", argLength: 3, commutative: false}, - {name: "LessUint8x64", argLength: 2, commutative: false}, - {name: "LessEqualUint8x64", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint8x64", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x64", argLength: 3, commutative: false}, - {name: "MaxUint8x64", argLength: 2, commutative: true}, - {name: "MaxMaskedUint8x64", argLength: 3, commutative: true}, - {name: "MinUint8x64", argLength: 2, commutative: true}, - {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, - {name: "NotEqualUint8x64", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "PermuteInt8x64", argLength: 2, commutative: false}, - {name: "PermuteUint8x64", argLength: 2, commutative: false}, - {name: "Permute2Uint8x64", argLength: 3, commutative: false}, - {name: "Permute2Int8x64", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, - {name: "PopCountUint8x64", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, - {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, + {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, + {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, + {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, + {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, + {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, + {name: "SubUint8x16", argLength: 2, commutative: false}, + {name: "SubUint8x32", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, - {name: "SubMaskedUint8x64", argLength: 3, commutative: false}, - {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SubUint16x8", argLength: 2, commutative: false}, + {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "SubUint16x32", argLength: 2, commutative: false}, + {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "SubUint64x2", argLength: 2, commutative: false}, + {name: "SubUint64x4", argLength: 2, commutative: false}, + {name: "SubUint64x8", argLength: 2, commutative: false}, + {name: "TruncFloat32x4", argLength: 1, commutative: false}, + {name: "TruncFloat32x8", argLength: 1, commutative: false}, + {name: "TruncFloat64x2", argLength: 1, commutative: false}, + {name: "TruncFloat64x4", argLength: 1, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "XorInt8x16", argLength: 2, commutative: true}, + {name: "XorInt8x32", argLength: 2, commutative: true}, + {name: "XorInt16x8", argLength: 2, commutative: true}, + {name: "XorInt16x16", argLength: 2, commutative: true}, + {name: "XorInt32x4", argLength: 2, commutative: true}, + {name: "XorInt32x8", argLength: 2, commutative: true}, + {name: "XorInt32x16", argLength: 2, commutative: true}, + {name: "XorInt64x2", argLength: 2, commutative: true}, + {name: "XorInt64x4", argLength: 2, commutative: true}, + {name: "XorInt64x8", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x4", argLength: 3, commutative: true}, + {name: "XorMaskedInt32x8", argLength: 3, commutative: true}, + {name: "XorMaskedInt32x16", argLength: 3, commutative: true}, + {name: "XorMaskedInt64x2", argLength: 3, commutative: true}, + {name: "XorMaskedInt64x4", argLength: 3, commutative: true}, + {name: "XorMaskedInt64x8", argLength: 3, commutative: true}, + {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, + {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, + {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, + {name: "XorMaskedUint64x2", argLength: 3, commutative: true}, + {name: "XorMaskedUint64x4", argLength: 3, commutative: true}, + {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, + {name: "XorUint8x16", argLength: 2, commutative: true}, + {name: "XorUint8x32", argLength: 2, commutative: true}, + {name: "XorUint16x8", argLength: 2, commutative: true}, + {name: "XorUint16x16", argLength: 2, commutative: true}, + {name: "XorUint32x4", argLength: 2, commutative: true}, + {name: "XorUint32x8", argLength: 2, commutative: true}, + {name: "XorUint32x16", argLength: 2, commutative: true}, + {name: "XorUint64x2", argLength: 2, commutative: true}, + {name: "XorUint64x4", argLength: 2, commutative: true}, + {name: "XorUint64x8", argLength: 2, commutative: true}, {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 8cc3e45902b584..89e0d853dcbb04 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1208,993 +1208,993 @@ const ( OpAMD64VZEROALL OpAMD64KMOVQload OpAMD64KMOVQstore - OpAMD64VADDPS512 - OpAMD64VADDPSMasked512 - OpAMD64VRCP14PS512 - OpAMD64VRCP14PSMasked512 - OpAMD64VRSQRT14PS512 - OpAMD64VRSQRT14PSMasked512 - OpAMD64VCOMPRESSPSMasked512 - OpAMD64VDIVPS512 - OpAMD64VDIVPSMasked512 - OpAMD64VFMADD213PS512 - OpAMD64VFMADD213PSMasked512 - OpAMD64VFMADDSUB213PS512 - OpAMD64VFMADDSUB213PSMasked512 - OpAMD64VFMSUBADD213PS512 - OpAMD64VFMSUBADD213PSMasked512 - OpAMD64VMAXPS512 - OpAMD64VMAXPSMasked512 - OpAMD64VMINPS512 - OpAMD64VMINPSMasked512 - OpAMD64VMULPS512 - OpAMD64VSCALEFPS512 - OpAMD64VSCALEFPSMasked512 - OpAMD64VMULPSMasked512 - OpAMD64VSQRTPS512 - OpAMD64VSQRTPSMasked512 - OpAMD64VSUBPS512 - OpAMD64VSUBPSMasked512 + OpAMD64VADDPD128 + OpAMD64VADDPD256 + OpAMD64VADDPD512 + OpAMD64VADDPDMasked128 + OpAMD64VADDPDMasked256 + OpAMD64VADDPDMasked512 OpAMD64VADDPS128 - OpAMD64VADDPSMasked128 - OpAMD64VADDSUBPS128 - OpAMD64VRCPPS128 - OpAMD64VRCP14PSMasked128 - OpAMD64VRSQRTPS128 - OpAMD64VRSQRT14PSMasked128 - OpAMD64VCOMPRESSPSMasked128 - OpAMD64VDIVPS128 - OpAMD64VDIVPSMasked128 - OpAMD64VFMADD213PS128 - OpAMD64VFMADD213PSMasked128 - OpAMD64VFMADDSUB213PS128 - OpAMD64VFMADDSUB213PSMasked128 - OpAMD64VFMSUBADD213PS128 - OpAMD64VFMSUBADD213PSMasked128 - OpAMD64VMAXPS128 - OpAMD64VMAXPSMasked128 - OpAMD64VMINPS128 - OpAMD64VMINPSMasked128 - OpAMD64VMULPS128 - OpAMD64VSCALEFPS128 - OpAMD64VSCALEFPSMasked128 - OpAMD64VMULPSMasked128 - OpAMD64VHADDPS128 - OpAMD64VHSUBPS128 - OpAMD64VSQRTPS128 - OpAMD64VSQRTPSMasked128 - OpAMD64VSUBPS128 - OpAMD64VSUBPSMasked128 OpAMD64VADDPS256 + OpAMD64VADDPS512 + OpAMD64VADDPSMasked128 OpAMD64VADDPSMasked256 + OpAMD64VADDPSMasked512 + OpAMD64VADDSUBPD128 + OpAMD64VADDSUBPD256 + OpAMD64VADDSUBPS128 OpAMD64VADDSUBPS256 - OpAMD64VRCPPS256 - OpAMD64VRCP14PSMasked256 - OpAMD64VRSQRTPS256 - OpAMD64VRSQRT14PSMasked256 + OpAMD64VCOMPRESSPDMasked128 + OpAMD64VCOMPRESSPDMasked256 + OpAMD64VCOMPRESSPDMasked512 + OpAMD64VCOMPRESSPSMasked128 OpAMD64VCOMPRESSPSMasked256 + OpAMD64VCOMPRESSPSMasked512 + OpAMD64VDIVPD128 + OpAMD64VDIVPD256 + OpAMD64VDIVPD512 + OpAMD64VDIVPDMasked128 + OpAMD64VDIVPDMasked256 + OpAMD64VDIVPDMasked512 + OpAMD64VDIVPS128 OpAMD64VDIVPS256 + OpAMD64VDIVPS512 + OpAMD64VDIVPSMasked128 OpAMD64VDIVPSMasked256 + OpAMD64VDIVPSMasked512 + OpAMD64VFMADD213PD128 + OpAMD64VFMADD213PD256 + OpAMD64VFMADD213PD512 + OpAMD64VFMADD213PDMasked128 + OpAMD64VFMADD213PDMasked256 + OpAMD64VFMADD213PDMasked512 + OpAMD64VFMADD213PS128 OpAMD64VFMADD213PS256 + OpAMD64VFMADD213PS512 + OpAMD64VFMADD213PSMasked128 OpAMD64VFMADD213PSMasked256 + OpAMD64VFMADD213PSMasked512 + OpAMD64VFMADDSUB213PD128 + OpAMD64VFMADDSUB213PD256 + OpAMD64VFMADDSUB213PD512 + OpAMD64VFMADDSUB213PDMasked128 + OpAMD64VFMADDSUB213PDMasked256 + OpAMD64VFMADDSUB213PDMasked512 + OpAMD64VFMADDSUB213PS128 OpAMD64VFMADDSUB213PS256 + OpAMD64VFMADDSUB213PS512 + OpAMD64VFMADDSUB213PSMasked128 OpAMD64VFMADDSUB213PSMasked256 + OpAMD64VFMADDSUB213PSMasked512 + OpAMD64VFMSUBADD213PD128 + OpAMD64VFMSUBADD213PD256 + OpAMD64VFMSUBADD213PD512 + OpAMD64VFMSUBADD213PDMasked128 + OpAMD64VFMSUBADD213PDMasked256 + OpAMD64VFMSUBADD213PDMasked512 + OpAMD64VFMSUBADD213PS128 OpAMD64VFMSUBADD213PS256 + OpAMD64VFMSUBADD213PS512 + OpAMD64VFMSUBADD213PSMasked128 OpAMD64VFMSUBADD213PSMasked256 - OpAMD64VMAXPS256 - OpAMD64VMAXPSMasked256 - OpAMD64VMINPS256 - OpAMD64VMINPSMasked256 - OpAMD64VMULPS256 - OpAMD64VSCALEFPS256 - OpAMD64VSCALEFPSMasked256 - OpAMD64VMULPSMasked256 + OpAMD64VFMSUBADD213PSMasked512 + OpAMD64VGF2P8MULB128 + OpAMD64VGF2P8MULB256 + OpAMD64VGF2P8MULB512 + OpAMD64VGF2P8MULBMasked128 + OpAMD64VGF2P8MULBMasked256 + OpAMD64VGF2P8MULBMasked512 + OpAMD64VHADDPD128 + OpAMD64VHADDPD256 + OpAMD64VHADDPS128 OpAMD64VHADDPS256 + OpAMD64VHSUBPD128 + OpAMD64VHSUBPD256 + OpAMD64VHSUBPS128 OpAMD64VHSUBPS256 - OpAMD64VSQRTPS256 - OpAMD64VSQRTPSMasked256 - OpAMD64VSUBPS256 - OpAMD64VSUBPSMasked256 - OpAMD64VADDPD128 - OpAMD64VADDPDMasked128 - OpAMD64VADDSUBPD128 - OpAMD64VRCP14PD128 - OpAMD64VRCP14PDMasked128 - OpAMD64VRSQRT14PD128 - OpAMD64VRSQRT14PDMasked128 - OpAMD64VCOMPRESSPDMasked128 - OpAMD64VDIVPD128 - OpAMD64VDIVPDMasked128 - OpAMD64VFMADD213PD128 - OpAMD64VFMADD213PDMasked128 - OpAMD64VFMADDSUB213PD128 - OpAMD64VFMADDSUB213PDMasked128 - OpAMD64VFMSUBADD213PD128 - OpAMD64VFMSUBADD213PDMasked128 OpAMD64VMAXPD128 - OpAMD64VMAXPDMasked128 - OpAMD64VMINPD128 - OpAMD64VMINPDMasked128 - OpAMD64VMULPD128 - OpAMD64VSCALEFPD128 - OpAMD64VSCALEFPDMasked128 - OpAMD64VMULPDMasked128 - OpAMD64VHADDPD128 - OpAMD64VHSUBPD128 - OpAMD64VSQRTPD128 - OpAMD64VSQRTPDMasked128 - OpAMD64VSUBPD128 - OpAMD64VSUBPDMasked128 - OpAMD64VADDPD256 - OpAMD64VADDPDMasked256 - OpAMD64VADDSUBPD256 - OpAMD64VRCP14PD256 - OpAMD64VRCP14PDMasked256 - OpAMD64VRSQRT14PD256 - OpAMD64VRSQRT14PDMasked256 - OpAMD64VCOMPRESSPDMasked256 - OpAMD64VDIVPD256 - OpAMD64VDIVPDMasked256 - OpAMD64VFMADD213PD256 - OpAMD64VFMADD213PDMasked256 - OpAMD64VFMADDSUB213PD256 - OpAMD64VFMADDSUB213PDMasked256 - OpAMD64VFMSUBADD213PD256 - OpAMD64VFMSUBADD213PDMasked256 OpAMD64VMAXPD256 - OpAMD64VMAXPDMasked256 - OpAMD64VMINPD256 - OpAMD64VMINPDMasked256 - OpAMD64VMULPD256 - OpAMD64VSCALEFPD256 - OpAMD64VSCALEFPDMasked256 - OpAMD64VMULPDMasked256 - OpAMD64VHADDPD256 - OpAMD64VHSUBPD256 - OpAMD64VSQRTPD256 - OpAMD64VSQRTPDMasked256 - OpAMD64VSUBPD256 - OpAMD64VSUBPDMasked256 - OpAMD64VADDPD512 - OpAMD64VADDPDMasked512 - OpAMD64VRCP14PD512 - OpAMD64VRCP14PDMasked512 - OpAMD64VRSQRT14PD512 - OpAMD64VRSQRT14PDMasked512 - OpAMD64VCOMPRESSPDMasked512 - OpAMD64VDIVPD512 - OpAMD64VDIVPDMasked512 - OpAMD64VFMADD213PD512 - OpAMD64VFMADD213PDMasked512 - OpAMD64VFMADDSUB213PD512 - OpAMD64VFMADDSUB213PDMasked512 - OpAMD64VFMSUBADD213PD512 - OpAMD64VFMSUBADD213PDMasked512 OpAMD64VMAXPD512 + OpAMD64VMAXPDMasked128 + OpAMD64VMAXPDMasked256 OpAMD64VMAXPDMasked512 + OpAMD64VMAXPS128 + OpAMD64VMAXPS256 + OpAMD64VMAXPS512 + OpAMD64VMAXPSMasked128 + OpAMD64VMAXPSMasked256 + OpAMD64VMAXPSMasked512 + OpAMD64VMINPD128 + OpAMD64VMINPD256 OpAMD64VMINPD512 + OpAMD64VMINPDMasked128 + OpAMD64VMINPDMasked256 OpAMD64VMINPDMasked512 + OpAMD64VMINPS128 + OpAMD64VMINPS256 + OpAMD64VMINPS512 + OpAMD64VMINPSMasked128 + OpAMD64VMINPSMasked256 + OpAMD64VMINPSMasked512 + OpAMD64VMULPD128 + OpAMD64VMULPD256 OpAMD64VMULPD512 - OpAMD64VSCALEFPD512 - OpAMD64VSCALEFPDMasked512 + OpAMD64VMULPDMasked128 + OpAMD64VMULPDMasked256 OpAMD64VMULPDMasked512 - OpAMD64VSQRTPD512 - OpAMD64VSQRTPDMasked512 - OpAMD64VSUBPD512 - OpAMD64VSUBPDMasked512 + OpAMD64VMULPS128 + OpAMD64VMULPS256 + OpAMD64VMULPS512 + OpAMD64VMULPSMasked128 + OpAMD64VMULPSMasked256 + OpAMD64VMULPSMasked512 + OpAMD64VPABSB128 + OpAMD64VPABSB256 + OpAMD64VPABSB512 + OpAMD64VPABSBMasked128 + OpAMD64VPABSBMasked256 + OpAMD64VPABSBMasked512 + OpAMD64VPABSD128 + OpAMD64VPABSD256 + OpAMD64VPABSD512 + OpAMD64VPABSDMasked128 + OpAMD64VPABSDMasked256 + OpAMD64VPABSDMasked512 + OpAMD64VPABSQ128 + OpAMD64VPABSQ256 + OpAMD64VPABSQ512 + OpAMD64VPABSQMasked128 + OpAMD64VPABSQMasked256 + OpAMD64VPABSQMasked512 + OpAMD64VPABSW128 OpAMD64VPABSW256 - OpAMD64VPABSWMasked256 - OpAMD64VPADDW256 - OpAMD64VPADDWMasked256 - OpAMD64VPCOMPRESSWMasked256 - OpAMD64VPCMPEQW256 - OpAMD64VPCMPGTW256 - OpAMD64VPMAXSW256 - OpAMD64VPMAXSWMasked256 - OpAMD64VPMINSW256 - OpAMD64VPMINSWMasked256 - OpAMD64VPMULHW256 - OpAMD64VPMULHWMasked256 - OpAMD64VPMULLW256 - OpAMD64VPMULLWMasked256 - OpAMD64VPMADDWD256 - OpAMD64VPMADDWDMasked256 - OpAMD64VPHADDW256 - OpAMD64VPHSUBW256 - OpAMD64VPOPCNTW256 - OpAMD64VPOPCNTWMasked256 - OpAMD64VPADDSW256 - OpAMD64VPADDSWMasked256 - OpAMD64VPHADDSW256 - OpAMD64VPHSUBSW256 - OpAMD64VPSUBSW256 - OpAMD64VPSUBSWMasked256 - OpAMD64VPSLLW256 - OpAMD64VPSLLWMasked256 - OpAMD64VPSRAW256 - OpAMD64VPSRAWMasked256 - OpAMD64VPSLLVW256 - OpAMD64VPSHLDVW256 - OpAMD64VPSHLDVWMasked256 - OpAMD64VPSLLVWMasked256 - OpAMD64VPSRAVW256 - OpAMD64VPSHRDVW256 - OpAMD64VPSHRDVWMasked256 - OpAMD64VPSRAVWMasked256 - OpAMD64VPSIGNW256 - OpAMD64VPSUBW256 - OpAMD64VPSUBWMasked256 OpAMD64VPABSW512 + OpAMD64VPABSWMasked128 + OpAMD64VPABSWMasked256 OpAMD64VPABSWMasked512 - OpAMD64VPADDW512 - OpAMD64VPADDWMasked512 - OpAMD64VPCOMPRESSWMasked512 - OpAMD64VPCMPEQW512 - OpAMD64VPCMPGTW512 - OpAMD64VPMAXSW512 - OpAMD64VPMAXSWMasked512 - OpAMD64VPMINSW512 - OpAMD64VPMINSWMasked512 - OpAMD64VPMULHW512 - OpAMD64VPMULHWMasked512 - OpAMD64VPMULLW512 - OpAMD64VPMULLWMasked512 - OpAMD64VPMADDWD512 - OpAMD64VPMADDWDMasked512 - OpAMD64VPOPCNTW512 - OpAMD64VPOPCNTWMasked512 + OpAMD64VPADDB128 + OpAMD64VPADDB256 + OpAMD64VPADDB512 + OpAMD64VPADDBMasked128 + OpAMD64VPADDBMasked256 + OpAMD64VPADDBMasked512 + OpAMD64VPADDD128 + OpAMD64VPADDD256 + OpAMD64VPADDD512 + OpAMD64VPADDDMasked128 + OpAMD64VPADDDMasked256 + OpAMD64VPADDDMasked512 + OpAMD64VPADDQ128 + OpAMD64VPADDQ256 + OpAMD64VPADDQ512 + OpAMD64VPADDQMasked128 + OpAMD64VPADDQMasked256 + OpAMD64VPADDQMasked512 + OpAMD64VPADDSB128 + OpAMD64VPADDSB256 + OpAMD64VPADDSB512 + OpAMD64VPADDSBMasked128 + OpAMD64VPADDSBMasked256 + OpAMD64VPADDSBMasked512 + OpAMD64VPADDSW128 + OpAMD64VPADDSW256 OpAMD64VPADDSW512 + OpAMD64VPADDSWMasked128 + OpAMD64VPADDSWMasked256 OpAMD64VPADDSWMasked512 - OpAMD64VPSUBSW512 - OpAMD64VPSUBSWMasked512 - OpAMD64VPSLLW512 - OpAMD64VPSLLWMasked512 - OpAMD64VPSRAW512 - OpAMD64VPSRAWMasked512 - OpAMD64VPSLLVW512 - OpAMD64VPSHLDVW512 - OpAMD64VPSHLDVWMasked512 - OpAMD64VPSLLVWMasked512 - OpAMD64VPSRAVW512 - OpAMD64VPSHRDVW512 - OpAMD64VPSHRDVWMasked512 - OpAMD64VPSRAVWMasked512 - OpAMD64VPSUBW512 - OpAMD64VPSUBWMasked512 - OpAMD64VPABSW128 - OpAMD64VPABSWMasked128 OpAMD64VPADDW128 + OpAMD64VPADDW256 + OpAMD64VPADDW512 OpAMD64VPADDWMasked128 - OpAMD64VPCOMPRESSWMasked128 - OpAMD64VPCMPEQW128 - OpAMD64VPCMPGTW128 - OpAMD64VPMAXSW128 - OpAMD64VPMAXSWMasked128 - OpAMD64VPMINSW128 - OpAMD64VPMINSWMasked128 - OpAMD64VPMULHW128 - OpAMD64VPMULHWMasked128 - OpAMD64VPMULLW128 - OpAMD64VPMULLWMasked128 - OpAMD64VPMADDWD128 - OpAMD64VPMADDWDMasked128 - OpAMD64VPHADDW128 - OpAMD64VPHSUBW128 - OpAMD64VPOPCNTW128 - OpAMD64VPOPCNTWMasked128 - OpAMD64VPADDSW128 - OpAMD64VPADDSWMasked128 - OpAMD64VPHADDSW128 - OpAMD64VPHSUBSW128 - OpAMD64VPSUBSW128 - OpAMD64VPSUBSWMasked128 - OpAMD64VPSLLW128 - OpAMD64VPSLLWMasked128 - OpAMD64VPSRAW128 - OpAMD64VPSRAWMasked128 - OpAMD64VPSLLVW128 - OpAMD64VPSHLDVW128 - OpAMD64VPSHLDVWMasked128 - OpAMD64VPSLLVWMasked128 - OpAMD64VPSRAVW128 - OpAMD64VPSHRDVW128 - OpAMD64VPSHRDVWMasked128 - OpAMD64VPSRAVWMasked128 - OpAMD64VPSIGNW128 - OpAMD64VPSUBW128 - OpAMD64VPSUBWMasked128 - OpAMD64VPABSD512 - OpAMD64VPABSDMasked512 - OpAMD64VPADDD512 - OpAMD64VPADDDMasked512 + OpAMD64VPADDWMasked256 + OpAMD64VPADDWMasked512 + OpAMD64VPAND128 + OpAMD64VPAND256 OpAMD64VPANDD512 + OpAMD64VPANDDMasked128 + OpAMD64VPANDDMasked256 OpAMD64VPANDDMasked512 + OpAMD64VPANDN128 + OpAMD64VPANDN256 OpAMD64VPANDND512 + OpAMD64VPANDNDMasked128 + OpAMD64VPANDNDMasked256 OpAMD64VPANDNDMasked512 - OpAMD64VPCOMPRESSDMasked512 + OpAMD64VPANDNQ512 + OpAMD64VPANDNQMasked128 + OpAMD64VPANDNQMasked256 + OpAMD64VPANDNQMasked512 + OpAMD64VPANDQ512 + OpAMD64VPANDQMasked128 + OpAMD64VPANDQMasked256 + OpAMD64VPANDQMasked512 + OpAMD64VPAVGB128 + OpAMD64VPAVGB256 + OpAMD64VPAVGB512 + OpAMD64VPAVGBMasked128 + OpAMD64VPAVGBMasked256 + OpAMD64VPAVGBMasked512 + OpAMD64VPAVGW128 + OpAMD64VPAVGW256 + OpAMD64VPAVGW512 + OpAMD64VPAVGWMasked128 + OpAMD64VPAVGWMasked256 + OpAMD64VPAVGWMasked512 + OpAMD64VPCMPEQB128 + OpAMD64VPCMPEQB256 + OpAMD64VPCMPEQB512 + OpAMD64VPCMPEQD128 + OpAMD64VPCMPEQD256 OpAMD64VPCMPEQD512 + OpAMD64VPCMPEQQ128 + OpAMD64VPCMPEQQ256 + OpAMD64VPCMPEQQ512 + OpAMD64VPCMPEQW128 + OpAMD64VPCMPEQW256 + OpAMD64VPCMPEQW512 + OpAMD64VPCMPGTB128 + OpAMD64VPCMPGTB256 + OpAMD64VPCMPGTB512 + OpAMD64VPCMPGTD128 + OpAMD64VPCMPGTD256 OpAMD64VPCMPGTD512 - OpAMD64VPMAXSD512 - OpAMD64VPMAXSDMasked512 - OpAMD64VPMINSD512 - OpAMD64VPMINSDMasked512 - OpAMD64VPMULLD512 - OpAMD64VPMULLDMasked512 - OpAMD64VPORD512 - OpAMD64VPORDMasked512 - OpAMD64VPDPWSSD512 - OpAMD64VPDPWSSDMasked512 - OpAMD64VPOPCNTD512 - OpAMD64VPOPCNTDMasked512 - OpAMD64VPROLVD512 - OpAMD64VPROLVDMasked512 - OpAMD64VPRORVD512 - OpAMD64VPRORVDMasked512 - OpAMD64VPDPWSSDS512 - OpAMD64VPDPWSSDSMasked512 + OpAMD64VPCMPGTQ128 + OpAMD64VPCMPGTQ256 + OpAMD64VPCMPGTQ512 + OpAMD64VPCMPGTW128 + OpAMD64VPCMPGTW256 + OpAMD64VPCMPGTW512 + OpAMD64VPCOMPRESSBMasked128 + OpAMD64VPCOMPRESSBMasked256 + OpAMD64VPCOMPRESSBMasked512 + OpAMD64VPCOMPRESSDMasked128 + OpAMD64VPCOMPRESSDMasked256 + OpAMD64VPCOMPRESSDMasked512 + OpAMD64VPCOMPRESSQMasked128 + OpAMD64VPCOMPRESSQMasked256 + OpAMD64VPCOMPRESSQMasked512 + OpAMD64VPCOMPRESSWMasked128 + OpAMD64VPCOMPRESSWMasked256 + OpAMD64VPCOMPRESSWMasked512 + OpAMD64VPDPBUSD128 + OpAMD64VPDPBUSD256 + OpAMD64VPDPBUSD512 + OpAMD64VPDPBUSDMasked128 + OpAMD64VPDPBUSDMasked256 + OpAMD64VPDPBUSDMasked512 + OpAMD64VPDPBUSDS128 + OpAMD64VPDPBUSDS256 OpAMD64VPDPBUSDS512 + OpAMD64VPDPBUSDSMasked128 + OpAMD64VPDPBUSDSMasked256 OpAMD64VPDPBUSDSMasked512 - OpAMD64VPSLLD512 - OpAMD64VPSLLDMasked512 - OpAMD64VPSRAD512 - OpAMD64VPSRADMasked512 - OpAMD64VPSLLVD512 - OpAMD64VPSHLDVD512 - OpAMD64VPSHLDVDMasked512 - OpAMD64VPSLLVDMasked512 - OpAMD64VPSRAVD512 - OpAMD64VPSHRDVD512 - OpAMD64VPSHRDVDMasked512 - OpAMD64VPSRAVDMasked512 - OpAMD64VPSUBD512 - OpAMD64VPSUBDMasked512 - OpAMD64VPDPBUSD512 - OpAMD64VPDPBUSDMasked512 - OpAMD64VPXORD512 - OpAMD64VPXORDMasked512 - OpAMD64VPABSD128 - OpAMD64VPABSDMasked128 - OpAMD64VPADDD128 - OpAMD64VPADDDMasked128 - OpAMD64VPANDDMasked128 - OpAMD64VPANDNDMasked128 - OpAMD64VPCOMPRESSDMasked128 - OpAMD64VPCMPEQD128 - OpAMD64VPCMPGTD128 - OpAMD64VPMAXSD128 - OpAMD64VPMAXSDMasked128 - OpAMD64VPMINSD128 - OpAMD64VPMINSDMasked128 - OpAMD64VPMULDQ128 - OpAMD64VPMULLD128 - OpAMD64VPMULLDMasked128 - OpAMD64VPORDMasked128 OpAMD64VPDPWSSD128 + OpAMD64VPDPWSSD256 + OpAMD64VPDPWSSD512 OpAMD64VPDPWSSDMasked128 - OpAMD64VPHADDD128 - OpAMD64VPHSUBD128 - OpAMD64VPOPCNTD128 - OpAMD64VPOPCNTDMasked128 - OpAMD64VPROLVD128 - OpAMD64VPROLVDMasked128 - OpAMD64VPRORVD128 - OpAMD64VPRORVDMasked128 + OpAMD64VPDPWSSDMasked256 + OpAMD64VPDPWSSDMasked512 OpAMD64VPDPWSSDS128 + OpAMD64VPDPWSSDS256 + OpAMD64VPDPWSSDS512 OpAMD64VPDPWSSDSMasked128 - OpAMD64VPDPBUSDS128 - OpAMD64VPDPBUSDSMasked128 - OpAMD64VPSLLD128 - OpAMD64VPSLLDMasked128 - OpAMD64VPSRAD128 - OpAMD64VPSRADMasked128 - OpAMD64VPSLLVD128 - OpAMD64VPSHLDVD128 - OpAMD64VPSHLDVDMasked128 - OpAMD64VPSLLVDMasked128 - OpAMD64VPSRAVD128 - OpAMD64VPSHRDVD128 - OpAMD64VPSHRDVDMasked128 - OpAMD64VPSRAVDMasked128 - OpAMD64VPSIGND128 - OpAMD64VPSUBD128 - OpAMD64VPSUBDMasked128 - OpAMD64VPDPBUSD128 - OpAMD64VPDPBUSDMasked128 - OpAMD64VPXORDMasked128 - OpAMD64VPABSD256 - OpAMD64VPABSDMasked256 - OpAMD64VPADDD256 - OpAMD64VPADDDMasked256 - OpAMD64VPANDDMasked256 - OpAMD64VPANDNDMasked256 - OpAMD64VPCOMPRESSDMasked256 - OpAMD64VPCMPEQD256 - OpAMD64VPCMPGTD256 + OpAMD64VPDPWSSDSMasked256 + OpAMD64VPDPWSSDSMasked512 + OpAMD64VPERMB128 + OpAMD64VPERMB256 + OpAMD64VPERMB512 + OpAMD64VPERMBMasked128 + OpAMD64VPERMBMasked256 + OpAMD64VPERMBMasked512 + OpAMD64VPERMD256 + OpAMD64VPERMD512 + OpAMD64VPERMDMasked256 + OpAMD64VPERMDMasked512 + OpAMD64VPERMI2B128 + OpAMD64VPERMI2B256 + OpAMD64VPERMI2B512 + OpAMD64VPERMI2BMasked128 + OpAMD64VPERMI2BMasked256 + OpAMD64VPERMI2BMasked512 + OpAMD64VPERMI2D128 + OpAMD64VPERMI2D256 + OpAMD64VPERMI2D512 + OpAMD64VPERMI2DMasked128 + OpAMD64VPERMI2DMasked256 + OpAMD64VPERMI2DMasked512 + OpAMD64VPERMI2PD128 + OpAMD64VPERMI2PD256 + OpAMD64VPERMI2PD512 + OpAMD64VPERMI2PDMasked128 + OpAMD64VPERMI2PDMasked256 + OpAMD64VPERMI2PDMasked512 + OpAMD64VPERMI2PS128 + OpAMD64VPERMI2PS256 + OpAMD64VPERMI2PS512 + OpAMD64VPERMI2PSMasked128 + OpAMD64VPERMI2PSMasked256 + OpAMD64VPERMI2PSMasked512 + OpAMD64VPERMI2Q128 + OpAMD64VPERMI2Q256 + OpAMD64VPERMI2Q512 + OpAMD64VPERMI2QMasked128 + OpAMD64VPERMI2QMasked256 + OpAMD64VPERMI2QMasked512 + OpAMD64VPERMI2W128 + OpAMD64VPERMI2W256 + OpAMD64VPERMI2W512 + OpAMD64VPERMI2WMasked128 + OpAMD64VPERMI2WMasked256 + OpAMD64VPERMI2WMasked512 + OpAMD64VPERMPD256 + OpAMD64VPERMPD512 + OpAMD64VPERMPDMasked256 + OpAMD64VPERMPDMasked512 + OpAMD64VPERMPS256 + OpAMD64VPERMPS512 + OpAMD64VPERMPSMasked256 + OpAMD64VPERMPSMasked512 + OpAMD64VPERMQ256 + OpAMD64VPERMQ512 + OpAMD64VPERMQMasked256 + OpAMD64VPERMQMasked512 + OpAMD64VPERMW128 + OpAMD64VPERMW256 + OpAMD64VPERMW512 + OpAMD64VPERMWMasked128 + OpAMD64VPERMWMasked256 + OpAMD64VPERMWMasked512 + OpAMD64VPHADDD128 + OpAMD64VPHADDD256 + OpAMD64VPHADDSW128 + OpAMD64VPHADDSW256 + OpAMD64VPHADDW128 + OpAMD64VPHADDW256 + OpAMD64VPHSUBD128 + OpAMD64VPHSUBD256 + OpAMD64VPHSUBSW128 + OpAMD64VPHSUBSW256 + OpAMD64VPHSUBW128 + OpAMD64VPHSUBW256 + OpAMD64VPMADDUBSW128 + OpAMD64VPMADDUBSW256 + OpAMD64VPMADDUBSW512 + OpAMD64VPMADDUBSWMasked128 + OpAMD64VPMADDUBSWMasked256 + OpAMD64VPMADDUBSWMasked512 + OpAMD64VPMADDWD128 + OpAMD64VPMADDWD256 + OpAMD64VPMADDWD512 + OpAMD64VPMADDWDMasked128 + OpAMD64VPMADDWDMasked256 + OpAMD64VPMADDWDMasked512 + OpAMD64VPMAXSB128 + OpAMD64VPMAXSB256 + OpAMD64VPMAXSB512 + OpAMD64VPMAXSBMasked128 + OpAMD64VPMAXSBMasked256 + OpAMD64VPMAXSBMasked512 + OpAMD64VPMAXSD128 OpAMD64VPMAXSD256 + OpAMD64VPMAXSD512 + OpAMD64VPMAXSDMasked128 OpAMD64VPMAXSDMasked256 + OpAMD64VPMAXSDMasked512 + OpAMD64VPMAXSQ128 + OpAMD64VPMAXSQ256 + OpAMD64VPMAXSQ512 + OpAMD64VPMAXSQMasked128 + OpAMD64VPMAXSQMasked256 + OpAMD64VPMAXSQMasked512 + OpAMD64VPMAXSW128 + OpAMD64VPMAXSW256 + OpAMD64VPMAXSW512 + OpAMD64VPMAXSWMasked128 + OpAMD64VPMAXSWMasked256 + OpAMD64VPMAXSWMasked512 + OpAMD64VPMAXUB128 + OpAMD64VPMAXUB256 + OpAMD64VPMAXUB512 + OpAMD64VPMAXUBMasked128 + OpAMD64VPMAXUBMasked256 + OpAMD64VPMAXUBMasked512 + OpAMD64VPMAXUD128 + OpAMD64VPMAXUD256 + OpAMD64VPMAXUD512 + OpAMD64VPMAXUDMasked128 + OpAMD64VPMAXUDMasked256 + OpAMD64VPMAXUDMasked512 + OpAMD64VPMAXUQ128 + OpAMD64VPMAXUQ256 + OpAMD64VPMAXUQ512 + OpAMD64VPMAXUQMasked128 + OpAMD64VPMAXUQMasked256 + OpAMD64VPMAXUQMasked512 + OpAMD64VPMAXUW128 + OpAMD64VPMAXUW256 + OpAMD64VPMAXUW512 + OpAMD64VPMAXUWMasked128 + OpAMD64VPMAXUWMasked256 + OpAMD64VPMAXUWMasked512 + OpAMD64VPMINSB128 + OpAMD64VPMINSB256 + OpAMD64VPMINSB512 + OpAMD64VPMINSBMasked128 + OpAMD64VPMINSBMasked256 + OpAMD64VPMINSBMasked512 + OpAMD64VPMINSD128 OpAMD64VPMINSD256 + OpAMD64VPMINSD512 + OpAMD64VPMINSDMasked128 OpAMD64VPMINSDMasked256 + OpAMD64VPMINSDMasked512 + OpAMD64VPMINSQ128 + OpAMD64VPMINSQ256 + OpAMD64VPMINSQ512 + OpAMD64VPMINSQMasked128 + OpAMD64VPMINSQMasked256 + OpAMD64VPMINSQMasked512 + OpAMD64VPMINSW128 + OpAMD64VPMINSW256 + OpAMD64VPMINSW512 + OpAMD64VPMINSWMasked128 + OpAMD64VPMINSWMasked256 + OpAMD64VPMINSWMasked512 + OpAMD64VPMINUB128 + OpAMD64VPMINUB256 + OpAMD64VPMINUB512 + OpAMD64VPMINUBMasked128 + OpAMD64VPMINUBMasked256 + OpAMD64VPMINUBMasked512 + OpAMD64VPMINUD128 + OpAMD64VPMINUD256 + OpAMD64VPMINUD512 + OpAMD64VPMINUDMasked128 + OpAMD64VPMINUDMasked256 + OpAMD64VPMINUDMasked512 + OpAMD64VPMINUQ128 + OpAMD64VPMINUQ256 + OpAMD64VPMINUQ512 + OpAMD64VPMINUQMasked128 + OpAMD64VPMINUQMasked256 + OpAMD64VPMINUQMasked512 + OpAMD64VPMINUW128 + OpAMD64VPMINUW256 + OpAMD64VPMINUW512 + OpAMD64VPMINUWMasked128 + OpAMD64VPMINUWMasked256 + OpAMD64VPMINUWMasked512 + OpAMD64VPMULDQ128 OpAMD64VPMULDQ256 + OpAMD64VPMULDQ512 + OpAMD64VPMULDQMasked128 + OpAMD64VPMULDQMasked256 + OpAMD64VPMULDQMasked512 + OpAMD64VPMULHUW128 + OpAMD64VPMULHUW256 + OpAMD64VPMULHUW512 + OpAMD64VPMULHUWMasked128 + OpAMD64VPMULHUWMasked256 + OpAMD64VPMULHUWMasked512 + OpAMD64VPMULHW128 + OpAMD64VPMULHW256 + OpAMD64VPMULHW512 + OpAMD64VPMULHWMasked128 + OpAMD64VPMULHWMasked256 + OpAMD64VPMULHWMasked512 + OpAMD64VPMULLD128 OpAMD64VPMULLD256 + OpAMD64VPMULLD512 + OpAMD64VPMULLDMasked128 OpAMD64VPMULLDMasked256 - OpAMD64VPORDMasked256 - OpAMD64VPDPWSSD256 - OpAMD64VPDPWSSDMasked256 - OpAMD64VPHADDD256 - OpAMD64VPHSUBD256 + OpAMD64VPMULLDMasked512 + OpAMD64VPMULLQ128 + OpAMD64VPMULLQ256 + OpAMD64VPMULLQ512 + OpAMD64VPMULLQMasked128 + OpAMD64VPMULLQMasked256 + OpAMD64VPMULLQMasked512 + OpAMD64VPMULLW128 + OpAMD64VPMULLW256 + OpAMD64VPMULLW512 + OpAMD64VPMULLWMasked128 + OpAMD64VPMULLWMasked256 + OpAMD64VPMULLWMasked512 + OpAMD64VPMULUDQ128 + OpAMD64VPMULUDQ256 + OpAMD64VPMULUDQ512 + OpAMD64VPMULUDQMasked128 + OpAMD64VPMULUDQMasked256 + OpAMD64VPMULUDQMasked512 + OpAMD64VPOPCNTB128 + OpAMD64VPOPCNTB256 + OpAMD64VPOPCNTB512 + OpAMD64VPOPCNTBMasked128 + OpAMD64VPOPCNTBMasked256 + OpAMD64VPOPCNTBMasked512 + OpAMD64VPOPCNTD128 OpAMD64VPOPCNTD256 + OpAMD64VPOPCNTD512 + OpAMD64VPOPCNTDMasked128 OpAMD64VPOPCNTDMasked256 + OpAMD64VPOPCNTDMasked512 + OpAMD64VPOPCNTQ128 + OpAMD64VPOPCNTQ256 + OpAMD64VPOPCNTQ512 + OpAMD64VPOPCNTQMasked128 + OpAMD64VPOPCNTQMasked256 + OpAMD64VPOPCNTQMasked512 + OpAMD64VPOPCNTW128 + OpAMD64VPOPCNTW256 + OpAMD64VPOPCNTW512 + OpAMD64VPOPCNTWMasked128 + OpAMD64VPOPCNTWMasked256 + OpAMD64VPOPCNTWMasked512 + OpAMD64VPOR128 + OpAMD64VPOR256 + OpAMD64VPORD512 + OpAMD64VPORDMasked128 + OpAMD64VPORDMasked256 + OpAMD64VPORDMasked512 + OpAMD64VPORQ512 + OpAMD64VPORQMasked128 + OpAMD64VPORQMasked256 + OpAMD64VPORQMasked512 + OpAMD64VPROLVD128 OpAMD64VPROLVD256 + OpAMD64VPROLVD512 + OpAMD64VPROLVDMasked128 OpAMD64VPROLVDMasked256 + OpAMD64VPROLVDMasked512 + OpAMD64VPROLVQ128 + OpAMD64VPROLVQ256 + OpAMD64VPROLVQ512 + OpAMD64VPROLVQMasked128 + OpAMD64VPROLVQMasked256 + OpAMD64VPROLVQMasked512 + OpAMD64VPRORVD128 OpAMD64VPRORVD256 + OpAMD64VPRORVD512 + OpAMD64VPRORVDMasked128 OpAMD64VPRORVDMasked256 - OpAMD64VPDPWSSDS256 - OpAMD64VPDPWSSDSMasked256 - OpAMD64VPDPBUSDS256 - OpAMD64VPDPBUSDSMasked256 - OpAMD64VPSLLD256 - OpAMD64VPSLLDMasked256 - OpAMD64VPSRAD256 - OpAMD64VPSRADMasked256 - OpAMD64VPSLLVD256 + OpAMD64VPRORVDMasked512 + OpAMD64VPRORVQ128 + OpAMD64VPRORVQ256 + OpAMD64VPRORVQ512 + OpAMD64VPRORVQMasked128 + OpAMD64VPRORVQMasked256 + OpAMD64VPRORVQMasked512 + OpAMD64VPSHLDVD128 OpAMD64VPSHLDVD256 + OpAMD64VPSHLDVD512 + OpAMD64VPSHLDVDMasked128 OpAMD64VPSHLDVDMasked256 - OpAMD64VPSLLVDMasked256 - OpAMD64VPSRAVD256 + OpAMD64VPSHLDVDMasked512 + OpAMD64VPSHLDVQ128 + OpAMD64VPSHLDVQ256 + OpAMD64VPSHLDVQ512 + OpAMD64VPSHLDVQMasked128 + OpAMD64VPSHLDVQMasked256 + OpAMD64VPSHLDVQMasked512 + OpAMD64VPSHLDVW128 + OpAMD64VPSHLDVW256 + OpAMD64VPSHLDVW512 + OpAMD64VPSHLDVWMasked128 + OpAMD64VPSHLDVWMasked256 + OpAMD64VPSHLDVWMasked512 + OpAMD64VPSHRDVD128 OpAMD64VPSHRDVD256 + OpAMD64VPSHRDVD512 + OpAMD64VPSHRDVDMasked128 OpAMD64VPSHRDVDMasked256 - OpAMD64VPSRAVDMasked256 + OpAMD64VPSHRDVDMasked512 + OpAMD64VPSHRDVQ128 + OpAMD64VPSHRDVQ256 + OpAMD64VPSHRDVQ512 + OpAMD64VPSHRDVQMasked128 + OpAMD64VPSHRDVQMasked256 + OpAMD64VPSHRDVQMasked512 + OpAMD64VPSHRDVW128 + OpAMD64VPSHRDVW256 + OpAMD64VPSHRDVW512 + OpAMD64VPSHRDVWMasked128 + OpAMD64VPSHRDVWMasked256 + OpAMD64VPSHRDVWMasked512 + OpAMD64VPSIGNB128 + OpAMD64VPSIGNB256 + OpAMD64VPSIGND128 OpAMD64VPSIGND256 - OpAMD64VPSUBD256 - OpAMD64VPSUBDMasked256 - OpAMD64VPDPBUSD256 - OpAMD64VPDPBUSDMasked256 - OpAMD64VPXORDMasked256 - OpAMD64VPABSQ128 - OpAMD64VPABSQMasked128 - OpAMD64VPADDQ128 - OpAMD64VPADDQMasked128 - OpAMD64VPANDQMasked128 - OpAMD64VPANDNQMasked128 - OpAMD64VPCOMPRESSQMasked128 - OpAMD64VPCMPEQQ128 - OpAMD64VPCMPGTQ128 - OpAMD64VPMAXSQ128 - OpAMD64VPMAXSQMasked128 - OpAMD64VPMINSQ128 - OpAMD64VPMINSQMasked128 - OpAMD64VPMULDQMasked128 - OpAMD64VPMULLQ128 - OpAMD64VPMULLQMasked128 - OpAMD64VPORQMasked128 - OpAMD64VPOPCNTQ128 - OpAMD64VPOPCNTQMasked128 - OpAMD64VPROLVQ128 - OpAMD64VPROLVQMasked128 - OpAMD64VPRORVQ128 - OpAMD64VPRORVQMasked128 + OpAMD64VPSIGNW128 + OpAMD64VPSIGNW256 + OpAMD64VPSLLD128 + OpAMD64VPSLLD256 + OpAMD64VPSLLD512 + OpAMD64VPSLLDMasked128 + OpAMD64VPSLLDMasked256 + OpAMD64VPSLLDMasked512 OpAMD64VPSLLQ128 + OpAMD64VPSLLQ256 + OpAMD64VPSLLQ512 OpAMD64VPSLLQMasked128 - OpAMD64VPSRAQ128 - OpAMD64VPSRAQMasked128 + OpAMD64VPSLLQMasked256 + OpAMD64VPSLLQMasked512 + OpAMD64VPSLLVD128 + OpAMD64VPSLLVD256 + OpAMD64VPSLLVD512 + OpAMD64VPSLLVDMasked128 + OpAMD64VPSLLVDMasked256 + OpAMD64VPSLLVDMasked512 OpAMD64VPSLLVQ128 - OpAMD64VPSHLDVQ128 - OpAMD64VPSHLDVQMasked128 + OpAMD64VPSLLVQ256 + OpAMD64VPSLLVQ512 OpAMD64VPSLLVQMasked128 - OpAMD64VPSRAVQ128 - OpAMD64VPSHRDVQ128 - OpAMD64VPSHRDVQMasked128 - OpAMD64VPSRAVQMasked128 - OpAMD64VPSUBQ128 - OpAMD64VPSUBQMasked128 - OpAMD64VPXORQMasked128 - OpAMD64VPABSQ256 - OpAMD64VPABSQMasked256 - OpAMD64VPADDQ256 - OpAMD64VPADDQMasked256 - OpAMD64VPANDQMasked256 - OpAMD64VPANDNQMasked256 - OpAMD64VPCOMPRESSQMasked256 - OpAMD64VPCMPEQQ256 - OpAMD64VPCMPGTQ256 - OpAMD64VPMAXSQ256 - OpAMD64VPMAXSQMasked256 - OpAMD64VPMINSQ256 - OpAMD64VPMINSQMasked256 - OpAMD64VPMULDQMasked256 - OpAMD64VPMULLQ256 - OpAMD64VPMULLQMasked256 - OpAMD64VPORQMasked256 - OpAMD64VPOPCNTQ256 - OpAMD64VPOPCNTQMasked256 - OpAMD64VPROLVQ256 - OpAMD64VPROLVQMasked256 - OpAMD64VPRORVQ256 - OpAMD64VPRORVQMasked256 - OpAMD64VPSLLQ256 - OpAMD64VPSLLQMasked256 + OpAMD64VPSLLVQMasked256 + OpAMD64VPSLLVQMasked512 + OpAMD64VPSLLVW128 + OpAMD64VPSLLVW256 + OpAMD64VPSLLVW512 + OpAMD64VPSLLVWMasked128 + OpAMD64VPSLLVWMasked256 + OpAMD64VPSLLVWMasked512 + OpAMD64VPSLLW128 + OpAMD64VPSLLW256 + OpAMD64VPSLLW512 + OpAMD64VPSLLWMasked128 + OpAMD64VPSLLWMasked256 + OpAMD64VPSLLWMasked512 + OpAMD64VPSRAD128 + OpAMD64VPSRAD256 + OpAMD64VPSRAD512 + OpAMD64VPSRADMasked128 + OpAMD64VPSRADMasked256 + OpAMD64VPSRADMasked512 + OpAMD64VPSRAQ128 OpAMD64VPSRAQ256 + OpAMD64VPSRAQ512 + OpAMD64VPSRAQMasked128 OpAMD64VPSRAQMasked256 - OpAMD64VPSLLVQ256 - OpAMD64VPSHLDVQ256 - OpAMD64VPSHLDVQMasked256 - OpAMD64VPSLLVQMasked256 - OpAMD64VPSRAVQ256 - OpAMD64VPSHRDVQ256 - OpAMD64VPSHRDVQMasked256 - OpAMD64VPSRAVQMasked256 - OpAMD64VPSUBQ256 - OpAMD64VPSUBQMasked256 - OpAMD64VPXORQMasked256 - OpAMD64VPABSQ512 - OpAMD64VPABSQMasked512 - OpAMD64VPADDQ512 - OpAMD64VPADDQMasked512 - OpAMD64VPANDQ512 - OpAMD64VPANDQMasked512 - OpAMD64VPANDNQ512 - OpAMD64VPANDNQMasked512 - OpAMD64VPCOMPRESSQMasked512 - OpAMD64VPCMPEQQ512 - OpAMD64VPCMPGTQ512 - OpAMD64VPMAXSQ512 - OpAMD64VPMAXSQMasked512 - OpAMD64VPMINSQ512 - OpAMD64VPMINSQMasked512 - OpAMD64VPMULDQ512 - OpAMD64VPMULDQMasked512 - OpAMD64VPMULLQ512 - OpAMD64VPMULLQMasked512 - OpAMD64VPORQ512 - OpAMD64VPORQMasked512 - OpAMD64VPOPCNTQ512 - OpAMD64VPOPCNTQMasked512 - OpAMD64VPROLVQ512 - OpAMD64VPROLVQMasked512 - OpAMD64VPRORVQ512 - OpAMD64VPRORVQMasked512 - OpAMD64VPSLLQ512 - OpAMD64VPSLLQMasked512 - OpAMD64VPSRAQ512 OpAMD64VPSRAQMasked512 - OpAMD64VPSLLVQ512 - OpAMD64VPSHLDVQ512 - OpAMD64VPSHLDVQMasked512 - OpAMD64VPSLLVQMasked512 + OpAMD64VPSRAVD128 + OpAMD64VPSRAVD256 + OpAMD64VPSRAVD512 + OpAMD64VPSRAVDMasked128 + OpAMD64VPSRAVDMasked256 + OpAMD64VPSRAVDMasked512 + OpAMD64VPSRAVQ128 + OpAMD64VPSRAVQ256 OpAMD64VPSRAVQ512 - OpAMD64VPSHRDVQ512 - OpAMD64VPSHRDVQMasked512 + OpAMD64VPSRAVQMasked128 + OpAMD64VPSRAVQMasked256 OpAMD64VPSRAVQMasked512 - OpAMD64VPSUBQ512 - OpAMD64VPSUBQMasked512 - OpAMD64VPXORQ512 - OpAMD64VPXORQMasked512 - OpAMD64VPABSB128 - OpAMD64VPABSBMasked128 - OpAMD64VPADDB128 - OpAMD64VPADDBMasked128 - OpAMD64VPAND128 - OpAMD64VPANDN128 - OpAMD64VPCOMPRESSBMasked128 - OpAMD64VPCMPEQB128 - OpAMD64VPCMPGTB128 - OpAMD64VPMAXSB128 - OpAMD64VPMAXSBMasked128 - OpAMD64VPMINSB128 - OpAMD64VPMINSBMasked128 - OpAMD64VPOR128 - OpAMD64VPOPCNTB128 - OpAMD64VPOPCNTBMasked128 - OpAMD64VPADDSB128 - OpAMD64VPADDSBMasked128 - OpAMD64VPSUBSB128 - OpAMD64VPSUBSBMasked128 - OpAMD64VPSIGNB128 - OpAMD64VPSUBB128 - OpAMD64VPSUBBMasked128 - OpAMD64VPXOR128 - OpAMD64VPABSB256 - OpAMD64VPABSBMasked256 - OpAMD64VPADDB256 - OpAMD64VPADDBMasked256 - OpAMD64VPAND256 - OpAMD64VPANDN256 - OpAMD64VPCOMPRESSBMasked256 - OpAMD64VPCMPEQB256 - OpAMD64VPCMPGTB256 - OpAMD64VPMAXSB256 - OpAMD64VPMAXSBMasked256 - OpAMD64VPMINSB256 - OpAMD64VPMINSBMasked256 - OpAMD64VPOR256 - OpAMD64VPOPCNTB256 - OpAMD64VPOPCNTBMasked256 - OpAMD64VPADDSB256 - OpAMD64VPADDSBMasked256 - OpAMD64VPSUBSB256 - OpAMD64VPSUBSBMasked256 - OpAMD64VPSIGNB256 - OpAMD64VPSUBB256 - OpAMD64VPSUBBMasked256 - OpAMD64VPXOR256 - OpAMD64VPABSB512 - OpAMD64VPABSBMasked512 - OpAMD64VPADDB512 - OpAMD64VPADDBMasked512 - OpAMD64VPCOMPRESSBMasked512 - OpAMD64VPCMPEQB512 - OpAMD64VPCMPGTB512 - OpAMD64VPMAXSB512 - OpAMD64VPMAXSBMasked512 - OpAMD64VPMINSB512 - OpAMD64VPMINSBMasked512 - OpAMD64VPOPCNTB512 - OpAMD64VPOPCNTBMasked512 - OpAMD64VPADDSB512 - OpAMD64VPADDSBMasked512 - OpAMD64VPSUBSB512 - OpAMD64VPSUBSBMasked512 - OpAMD64VPSUBB512 - OpAMD64VPSUBBMasked512 - OpAMD64VPAVGW256 - OpAMD64VPAVGWMasked256 - OpAMD64VPMAXUW256 - OpAMD64VPMAXUWMasked256 - OpAMD64VPMINUW256 - OpAMD64VPMINUWMasked256 - OpAMD64VPMULHUW256 - OpAMD64VPMULHUWMasked256 - OpAMD64VPERMW256 - OpAMD64VPERMI2W256 - OpAMD64VPERMI2WMasked256 - OpAMD64VPERMWMasked256 - OpAMD64VPSRLW256 - OpAMD64VPSRLWMasked256 - OpAMD64VPSRLVW256 - OpAMD64VPSRLVWMasked256 - OpAMD64VPAVGW512 - OpAMD64VPAVGWMasked512 - OpAMD64VPMAXUW512 - OpAMD64VPMAXUWMasked512 - OpAMD64VPMINUW512 - OpAMD64VPMINUWMasked512 - OpAMD64VPMULHUW512 - OpAMD64VPMULHUWMasked512 - OpAMD64VPERMW512 - OpAMD64VPERMI2W512 - OpAMD64VPERMI2WMasked512 - OpAMD64VPERMWMasked512 - OpAMD64VPSRLW512 - OpAMD64VPSRLWMasked512 - OpAMD64VPSRLVW512 - OpAMD64VPSRLVWMasked512 - OpAMD64VPAVGW128 - OpAMD64VPAVGWMasked128 - OpAMD64VPMAXUW128 - OpAMD64VPMAXUWMasked128 - OpAMD64VPMINUW128 - OpAMD64VPMINUWMasked128 - OpAMD64VPMULHUW128 - OpAMD64VPMULHUWMasked128 - OpAMD64VPERMW128 - OpAMD64VPERMI2W128 - OpAMD64VPERMI2WMasked128 - OpAMD64VPERMWMasked128 - OpAMD64VPSRLW128 - OpAMD64VPSRLWMasked128 - OpAMD64VPSRLVW128 - OpAMD64VPSRLVWMasked128 - OpAMD64VPMAXUD512 - OpAMD64VPMAXUDMasked512 - OpAMD64VPMINUD512 - OpAMD64VPMINUDMasked512 - OpAMD64VPERMD512 - OpAMD64VPERMPS512 - OpAMD64VPERMI2PS512 - OpAMD64VPERMI2D512 - OpAMD64VPERMI2PSMasked512 - OpAMD64VPERMI2DMasked512 - OpAMD64VPERMPSMasked512 - OpAMD64VPERMDMasked512 - OpAMD64VPSRLD512 - OpAMD64VPSRLDMasked512 - OpAMD64VPSRLVD512 - OpAMD64VPSRLVDMasked512 - OpAMD64VPMAXUD128 - OpAMD64VPMAXUDMasked128 - OpAMD64VPMINUD128 - OpAMD64VPMINUDMasked128 - OpAMD64VPMULUDQ128 - OpAMD64VPERMI2PS128 - OpAMD64VPERMI2D128 - OpAMD64VPERMI2DMasked128 - OpAMD64VPERMI2PSMasked128 + OpAMD64VPSRAVW128 + OpAMD64VPSRAVW256 + OpAMD64VPSRAVW512 + OpAMD64VPSRAVWMasked128 + OpAMD64VPSRAVWMasked256 + OpAMD64VPSRAVWMasked512 + OpAMD64VPSRAW128 + OpAMD64VPSRAW256 + OpAMD64VPSRAW512 + OpAMD64VPSRAWMasked128 + OpAMD64VPSRAWMasked256 + OpAMD64VPSRAWMasked512 OpAMD64VPSRLD128 - OpAMD64VPSRLDMasked128 - OpAMD64VPSRLVD128 - OpAMD64VPSRLVDMasked128 - OpAMD64VPMAXUD256 - OpAMD64VPMAXUDMasked256 - OpAMD64VPMINUD256 - OpAMD64VPMINUDMasked256 - OpAMD64VPMULUDQ256 - OpAMD64VPERMD256 - OpAMD64VPERMPS256 - OpAMD64VPERMI2PS256 - OpAMD64VPERMI2D256 - OpAMD64VPERMI2PSMasked256 - OpAMD64VPERMI2DMasked256 - OpAMD64VPERMPSMasked256 - OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 + OpAMD64VPSRLD512 + OpAMD64VPSRLDMasked128 OpAMD64VPSRLDMasked256 - OpAMD64VPSRLVD256 - OpAMD64VPSRLVDMasked256 - OpAMD64VPMAXUQ128 - OpAMD64VPMAXUQMasked128 - OpAMD64VPMINUQ128 - OpAMD64VPMINUQMasked128 - OpAMD64VPMULUDQMasked128 - OpAMD64VPERMI2PD128 - OpAMD64VPERMI2Q128 - OpAMD64VPERMI2PDMasked128 - OpAMD64VPERMI2QMasked128 + OpAMD64VPSRLDMasked512 OpAMD64VPSRLQ128 - OpAMD64VPSRLQMasked128 - OpAMD64VPSRLVQ128 - OpAMD64VPSRLVQMasked128 - OpAMD64VPMAXUQ256 - OpAMD64VPMAXUQMasked256 - OpAMD64VPMINUQ256 - OpAMD64VPMINUQMasked256 - OpAMD64VPMULUDQMasked256 - OpAMD64VPERMPD256 - OpAMD64VPERMQ256 - OpAMD64VPERMI2PD256 - OpAMD64VPERMI2Q256 - OpAMD64VPERMI2PDMasked256 - OpAMD64VPERMI2QMasked256 - OpAMD64VPERMQMasked256 - OpAMD64VPERMPDMasked256 OpAMD64VPSRLQ256 - OpAMD64VPSRLQMasked256 - OpAMD64VPSRLVQ256 - OpAMD64VPSRLVQMasked256 - OpAMD64VPMAXUQ512 - OpAMD64VPMAXUQMasked512 - OpAMD64VPMINUQ512 - OpAMD64VPMINUQMasked512 - OpAMD64VPMULUDQ512 - OpAMD64VPMULUDQMasked512 - OpAMD64VPERMPD512 - OpAMD64VPERMQ512 - OpAMD64VPERMI2Q512 - OpAMD64VPERMI2PD512 - OpAMD64VPERMI2QMasked512 - OpAMD64VPERMI2PDMasked512 - OpAMD64VPERMPDMasked512 - OpAMD64VPERMQMasked512 OpAMD64VPSRLQ512 + OpAMD64VPSRLQMasked128 + OpAMD64VPSRLQMasked256 OpAMD64VPSRLQMasked512 + OpAMD64VPSRLVD128 + OpAMD64VPSRLVD256 + OpAMD64VPSRLVD512 + OpAMD64VPSRLVDMasked128 + OpAMD64VPSRLVDMasked256 + OpAMD64VPSRLVDMasked512 + OpAMD64VPSRLVQ128 + OpAMD64VPSRLVQ256 OpAMD64VPSRLVQ512 + OpAMD64VPSRLVQMasked128 + OpAMD64VPSRLVQMasked256 OpAMD64VPSRLVQMasked512 - OpAMD64VPAVGB128 - OpAMD64VPAVGBMasked128 - OpAMD64VGF2P8MULB128 - OpAMD64VGF2P8MULBMasked128 - OpAMD64VPMAXUB128 - OpAMD64VPMAXUBMasked128 - OpAMD64VPMINUB128 - OpAMD64VPMINUBMasked128 - OpAMD64VPERMB128 - OpAMD64VPERMI2B128 - OpAMD64VPERMI2BMasked128 - OpAMD64VPERMBMasked128 - OpAMD64VPMADDUBSW128 - OpAMD64VPMADDUBSWMasked128 - OpAMD64VPAVGB256 - OpAMD64VPAVGBMasked256 - OpAMD64VGF2P8MULB256 - OpAMD64VGF2P8MULBMasked256 - OpAMD64VPMAXUB256 - OpAMD64VPMAXUBMasked256 - OpAMD64VPMINUB256 - OpAMD64VPMINUBMasked256 - OpAMD64VPERMB256 - OpAMD64VPERMI2B256 - OpAMD64VPERMI2BMasked256 - OpAMD64VPERMBMasked256 - OpAMD64VPMADDUBSW256 - OpAMD64VPMADDUBSWMasked256 - OpAMD64VPAVGB512 - OpAMD64VPAVGBMasked512 - OpAMD64VGF2P8MULB512 - OpAMD64VGF2P8MULBMasked512 - OpAMD64VPMAXUB512 - OpAMD64VPMAXUBMasked512 - OpAMD64VPMINUB512 - OpAMD64VPMINUBMasked512 - OpAMD64VPERMB512 - OpAMD64VPERMI2B512 - OpAMD64VPERMI2BMasked512 - OpAMD64VPERMBMasked512 - OpAMD64VPMADDUBSW512 - OpAMD64VPMADDUBSWMasked512 - OpAMD64VRNDSCALEPS512 - OpAMD64VRNDSCALEPSMasked512 - OpAMD64VREDUCEPS512 - OpAMD64VREDUCEPSMasked512 - OpAMD64VCMPPS512 - OpAMD64VCMPPSMasked512 + OpAMD64VPSRLVW128 + OpAMD64VPSRLVW256 + OpAMD64VPSRLVW512 + OpAMD64VPSRLVWMasked128 + OpAMD64VPSRLVWMasked256 + OpAMD64VPSRLVWMasked512 + OpAMD64VPSRLW128 + OpAMD64VPSRLW256 + OpAMD64VPSRLW512 + OpAMD64VPSRLWMasked128 + OpAMD64VPSRLWMasked256 + OpAMD64VPSRLWMasked512 + OpAMD64VPSUBB128 + OpAMD64VPSUBB256 + OpAMD64VPSUBB512 + OpAMD64VPSUBBMasked128 + OpAMD64VPSUBBMasked256 + OpAMD64VPSUBBMasked512 + OpAMD64VPSUBD128 + OpAMD64VPSUBD256 + OpAMD64VPSUBD512 + OpAMD64VPSUBDMasked128 + OpAMD64VPSUBDMasked256 + OpAMD64VPSUBDMasked512 + OpAMD64VPSUBQ128 + OpAMD64VPSUBQ256 + OpAMD64VPSUBQ512 + OpAMD64VPSUBQMasked128 + OpAMD64VPSUBQMasked256 + OpAMD64VPSUBQMasked512 + OpAMD64VPSUBSB128 + OpAMD64VPSUBSB256 + OpAMD64VPSUBSB512 + OpAMD64VPSUBSBMasked128 + OpAMD64VPSUBSBMasked256 + OpAMD64VPSUBSBMasked512 + OpAMD64VPSUBSW128 + OpAMD64VPSUBSW256 + OpAMD64VPSUBSW512 + OpAMD64VPSUBSWMasked128 + OpAMD64VPSUBSWMasked256 + OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBW128 + OpAMD64VPSUBW256 + OpAMD64VPSUBW512 + OpAMD64VPSUBWMasked128 + OpAMD64VPSUBWMasked256 + OpAMD64VPSUBWMasked512 + OpAMD64VPXOR128 + OpAMD64VPXOR256 + OpAMD64VPXORD512 + OpAMD64VPXORDMasked128 + OpAMD64VPXORDMasked256 + OpAMD64VPXORDMasked512 + OpAMD64VPXORQ512 + OpAMD64VPXORQMasked128 + OpAMD64VPXORQMasked256 + OpAMD64VPXORQMasked512 + OpAMD64VRCP14PD128 + OpAMD64VRCP14PD256 + OpAMD64VRCP14PD512 + OpAMD64VRCP14PDMasked128 + OpAMD64VRCP14PDMasked256 + OpAMD64VRCP14PDMasked512 + OpAMD64VRCP14PS512 + OpAMD64VRCP14PSMasked128 + OpAMD64VRCP14PSMasked256 + OpAMD64VRCP14PSMasked512 + OpAMD64VRCPPS128 + OpAMD64VRCPPS256 + OpAMD64VRSQRT14PD128 + OpAMD64VRSQRT14PD256 + OpAMD64VRSQRT14PD512 + OpAMD64VRSQRT14PDMasked128 + OpAMD64VRSQRT14PDMasked256 + OpAMD64VRSQRT14PDMasked512 + OpAMD64VRSQRT14PS512 + OpAMD64VRSQRT14PSMasked128 + OpAMD64VRSQRT14PSMasked256 + OpAMD64VRSQRT14PSMasked512 + OpAMD64VRSQRTPS128 + OpAMD64VRSQRTPS256 + OpAMD64VSCALEFPD128 + OpAMD64VSCALEFPD256 + OpAMD64VSCALEFPD512 + OpAMD64VSCALEFPDMasked128 + OpAMD64VSCALEFPDMasked256 + OpAMD64VSCALEFPDMasked512 + OpAMD64VSCALEFPS128 + OpAMD64VSCALEFPS256 + OpAMD64VSCALEFPS512 + OpAMD64VSCALEFPSMasked128 + OpAMD64VSCALEFPSMasked256 + OpAMD64VSCALEFPSMasked512 + OpAMD64VSQRTPD128 + OpAMD64VSQRTPD256 + OpAMD64VSQRTPD512 + OpAMD64VSQRTPDMasked128 + OpAMD64VSQRTPDMasked256 + OpAMD64VSQRTPDMasked512 + OpAMD64VSQRTPS128 + OpAMD64VSQRTPS256 + OpAMD64VSQRTPS512 + OpAMD64VSQRTPSMasked128 + OpAMD64VSQRTPSMasked256 + OpAMD64VSQRTPSMasked512 + OpAMD64VSUBPD128 + OpAMD64VSUBPD256 + OpAMD64VSUBPD512 + OpAMD64VSUBPDMasked128 + OpAMD64VSUBPDMasked256 + OpAMD64VSUBPDMasked512 + OpAMD64VSUBPS128 + OpAMD64VSUBPS256 + OpAMD64VSUBPS512 + OpAMD64VSUBPSMasked128 + OpAMD64VSUBPSMasked256 + OpAMD64VSUBPSMasked512 OpAMD64VROUNDPS128 - OpAMD64VRNDSCALEPS128 - OpAMD64VRNDSCALEPSMasked128 - OpAMD64VREDUCEPS128 - OpAMD64VREDUCEPSMasked128 - OpAMD64VDPPS128 - OpAMD64VCMPPS128 - OpAMD64VCMPPSMasked128 OpAMD64VROUNDPS256 - OpAMD64VRNDSCALEPS256 - OpAMD64VRNDSCALEPSMasked256 - OpAMD64VREDUCEPS256 - OpAMD64VREDUCEPSMasked256 - OpAMD64VDPPS256 - OpAMD64VCMPPS256 - OpAMD64VCMPPSMasked256 - OpAMD64VEXTRACTF128128 - OpAMD64VINSERTF128256 OpAMD64VROUNDPD128 + OpAMD64VROUNDPD256 + OpAMD64VRNDSCALEPS128 + OpAMD64VRNDSCALEPS256 + OpAMD64VRNDSCALEPS512 OpAMD64VRNDSCALEPD128 + OpAMD64VRNDSCALEPD256 + OpAMD64VRNDSCALEPD512 + OpAMD64VRNDSCALEPSMasked128 + OpAMD64VRNDSCALEPSMasked256 + OpAMD64VRNDSCALEPSMasked512 OpAMD64VRNDSCALEPDMasked128 + OpAMD64VRNDSCALEPDMasked256 + OpAMD64VRNDSCALEPDMasked512 + OpAMD64VREDUCEPS128 + OpAMD64VREDUCEPS256 + OpAMD64VREDUCEPS512 OpAMD64VREDUCEPD128 + OpAMD64VREDUCEPD256 + OpAMD64VREDUCEPD512 + OpAMD64VREDUCEPSMasked128 + OpAMD64VREDUCEPSMasked256 + OpAMD64VREDUCEPSMasked512 OpAMD64VREDUCEPDMasked128 + OpAMD64VREDUCEPDMasked256 + OpAMD64VREDUCEPDMasked512 + OpAMD64VDPPS128 + OpAMD64VDPPS256 OpAMD64VDPPD128 + OpAMD64VCMPPS128 + OpAMD64VCMPPS256 + OpAMD64VCMPPS512 OpAMD64VCMPPD128 - OpAMD64VCMPPDMasked128 - OpAMD64VROUNDPD256 - OpAMD64VRNDSCALEPD256 - OpAMD64VRNDSCALEPDMasked256 - OpAMD64VREDUCEPD256 - OpAMD64VREDUCEPDMasked256 OpAMD64VCMPPD256 - OpAMD64VCMPPDMasked256 - OpAMD64VRNDSCALEPD512 - OpAMD64VRNDSCALEPDMasked512 - OpAMD64VREDUCEPD512 - OpAMD64VREDUCEPDMasked512 OpAMD64VCMPPD512 + OpAMD64VCMPPSMasked128 + OpAMD64VCMPPSMasked256 + OpAMD64VCMPPSMasked512 + OpAMD64VCMPPDMasked128 + OpAMD64VCMPPDMasked256 OpAMD64VCMPPDMasked512 + OpAMD64VPCMPBMasked128 + OpAMD64VPCMPBMasked256 + OpAMD64VPCMPBMasked512 + OpAMD64VPCMPWMasked128 OpAMD64VPCMPWMasked256 - OpAMD64VPCMPW256 - OpAMD64VPSHLDW256 - OpAMD64VPSHLDWMasked256 - OpAMD64VPSHRDW256 - OpAMD64VPSHRDWMasked256 OpAMD64VPCMPWMasked512 - OpAMD64VPCMPW512 - OpAMD64VPSHLDW512 - OpAMD64VPSHLDWMasked512 - OpAMD64VPSHRDW512 - OpAMD64VPSHRDWMasked512 - OpAMD64VPCMPWMasked128 - OpAMD64VPEXTRW128 - OpAMD64VPCMPW128 - OpAMD64VPINSRW128 - OpAMD64VPSHLDW128 - OpAMD64VPSHLDWMasked128 - OpAMD64VPSHRDW128 - OpAMD64VPSHRDWMasked128 - OpAMD64VPCMPDMasked512 - OpAMD64VPCMPD512 - OpAMD64VPROLD512 - OpAMD64VPROLDMasked512 - OpAMD64VPRORD512 - OpAMD64VPRORDMasked512 - OpAMD64VPSHLDD512 - OpAMD64VPSHLDDMasked512 - OpAMD64VPSHRDD512 - OpAMD64VPSHRDDMasked512 OpAMD64VPCMPDMasked128 - OpAMD64VPEXTRD128 - OpAMD64VPCMPD128 - OpAMD64VPROLD128 - OpAMD64VPROLDMasked128 - OpAMD64VPRORD128 - OpAMD64VPRORDMasked128 - OpAMD64VPINSRD128 - OpAMD64VPSHLDD128 - OpAMD64VPSHLDDMasked128 - OpAMD64VPSHRDD128 - OpAMD64VPSHRDDMasked128 OpAMD64VPCMPDMasked256 - OpAMD64VPCMPD256 - OpAMD64VPROLD256 - OpAMD64VPROLDMasked256 - OpAMD64VPRORD256 - OpAMD64VPRORDMasked256 - OpAMD64VPSHLDD256 - OpAMD64VPSHLDDMasked256 - OpAMD64VPSHRDD256 - OpAMD64VPSHRDDMasked256 + OpAMD64VPCMPDMasked512 OpAMD64VPCMPQMasked128 - OpAMD64VPEXTRQ128 - OpAMD64VPCMPQ128 - OpAMD64VPROLQ128 - OpAMD64VPROLQMasked128 - OpAMD64VPRORQ128 - OpAMD64VPRORQMasked128 - OpAMD64VPINSRQ128 - OpAMD64VPSHLDQ128 - OpAMD64VPSHLDQMasked128 - OpAMD64VPSHRDQ128 - OpAMD64VPSHRDQMasked128 OpAMD64VPCMPQMasked256 - OpAMD64VPCMPQ256 - OpAMD64VPROLQ256 - OpAMD64VPROLQMasked256 - OpAMD64VPRORQ256 - OpAMD64VPRORQMasked256 - OpAMD64VPSHLDQ256 - OpAMD64VPSHLDQMasked256 - OpAMD64VPSHRDQ256 - OpAMD64VPSHRDQMasked256 OpAMD64VPCMPQMasked512 - OpAMD64VPCMPQ512 - OpAMD64VPROLQ512 - OpAMD64VPROLQMasked512 - OpAMD64VPRORQ512 - OpAMD64VPRORQMasked512 - OpAMD64VPSHLDQ512 - OpAMD64VPSHLDQMasked512 - OpAMD64VPSHRDQ512 - OpAMD64VPSHRDQMasked512 - OpAMD64VPCMPBMasked128 - OpAMD64VPEXTRB128 - OpAMD64VPCMPB128 - OpAMD64VPINSRB128 - OpAMD64VPCMPBMasked256 - OpAMD64VEXTRACTI128128 - OpAMD64VPCMPB256 - OpAMD64VINSERTI128256 - OpAMD64VPCMPBMasked512 - OpAMD64VPCMPB512 + OpAMD64VPCMPUBMasked128 + OpAMD64VPCMPUBMasked256 + OpAMD64VPCMPUBMasked512 + OpAMD64VPCMPUWMasked128 OpAMD64VPCMPUWMasked256 - OpAMD64VPCMPUW256 OpAMD64VPCMPUWMasked512 - OpAMD64VPCMPUW512 - OpAMD64VPCMPUWMasked128 - OpAMD64VPCMPUW128 - OpAMD64VPCMPUDMasked512 - OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked128 - OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked256 - OpAMD64VPCMPUD256 + OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUQMasked128 - OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked256 - OpAMD64VPCMPUQ256 OpAMD64VPCMPUQMasked512 - OpAMD64VPCMPUQ512 - OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 - OpAMD64VGF2P8AFFINEINVQB128 - OpAMD64VGF2P8AFFINEINVQBMasked128 - OpAMD64VGF2P8AFFINEQBMasked128 - OpAMD64VPCMPUB128 - OpAMD64VPCMPUBMasked256 OpAMD64VGF2P8AFFINEQB256 - OpAMD64VGF2P8AFFINEINVQB256 - OpAMD64VGF2P8AFFINEINVQBMasked256 - OpAMD64VGF2P8AFFINEQBMasked256 - OpAMD64VPCMPUB256 - OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 + OpAMD64VGF2P8AFFINEINVQB128 + OpAMD64VGF2P8AFFINEINVQB256 OpAMD64VGF2P8AFFINEINVQB512 + OpAMD64VGF2P8AFFINEINVQBMasked128 + OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VGF2P8AFFINEINVQBMasked512 + OpAMD64VGF2P8AFFINEQBMasked128 + OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VEXTRACTF128128 + OpAMD64VEXTRACTI128128 + OpAMD64VPEXTRB128 + OpAMD64VPEXTRW128 + OpAMD64VPEXTRD128 + OpAMD64VPEXTRQ128 + OpAMD64VPCMPUB128 + OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 + OpAMD64VPCMPUW128 + OpAMD64VPCMPUW256 + OpAMD64VPCMPUW512 + OpAMD64VPCMPUD128 + OpAMD64VPCMPUD256 + OpAMD64VPCMPUD512 + OpAMD64VPCMPUQ128 + OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQ512 + OpAMD64VPCMPB128 + OpAMD64VPCMPB256 + OpAMD64VPCMPB512 + OpAMD64VPCMPW128 + OpAMD64VPCMPW256 + OpAMD64VPCMPW512 + OpAMD64VPCMPD128 + OpAMD64VPCMPD256 + OpAMD64VPCMPD512 + OpAMD64VPCMPQ128 + OpAMD64VPCMPQ256 + OpAMD64VPCMPQ512 + OpAMD64VPROLD128 + OpAMD64VPROLD256 + OpAMD64VPROLD512 + OpAMD64VPROLQ128 + OpAMD64VPROLQ256 + OpAMD64VPROLQ512 + OpAMD64VPROLDMasked128 + OpAMD64VPROLDMasked256 + OpAMD64VPROLDMasked512 + OpAMD64VPROLQMasked128 + OpAMD64VPROLQMasked256 + OpAMD64VPROLQMasked512 + OpAMD64VPRORD128 + OpAMD64VPRORD256 + OpAMD64VPRORD512 + OpAMD64VPRORQ128 + OpAMD64VPRORQ256 + OpAMD64VPRORQ512 + OpAMD64VPRORDMasked128 + OpAMD64VPRORDMasked256 + OpAMD64VPRORDMasked512 + OpAMD64VPRORQMasked128 + OpAMD64VPRORQMasked256 + OpAMD64VPRORQMasked512 + OpAMD64VINSERTF128256 + OpAMD64VINSERTI128256 + OpAMD64VPINSRB128 + OpAMD64VPINSRW128 + OpAMD64VPINSRD128 + OpAMD64VPINSRQ128 + OpAMD64VPSHLDW128 + OpAMD64VPSHLDW256 + OpAMD64VPSHLDW512 + OpAMD64VPSHLDD128 + OpAMD64VPSHLDD256 + OpAMD64VPSHLDD512 + OpAMD64VPSHLDQ128 + OpAMD64VPSHLDQ256 + OpAMD64VPSHLDQ512 + OpAMD64VPSHLDWMasked128 + OpAMD64VPSHLDWMasked256 + OpAMD64VPSHLDWMasked512 + OpAMD64VPSHLDDMasked128 + OpAMD64VPSHLDDMasked256 + OpAMD64VPSHLDDMasked512 + OpAMD64VPSHLDQMasked128 + OpAMD64VPSHLDQMasked256 + OpAMD64VPSHLDQMasked512 + OpAMD64VPSHRDW128 + OpAMD64VPSHRDW256 + OpAMD64VPSHRDW512 + OpAMD64VPSHRDD128 + OpAMD64VPSHRDD256 + OpAMD64VPSHRDD512 + OpAMD64VPSHRDQ128 + OpAMD64VPSHRDQ256 + OpAMD64VPSHRDQ512 + OpAMD64VPSHRDWMasked128 + OpAMD64VPSHRDWMasked256 + OpAMD64VPSHRDWMasked512 + OpAMD64VPSHRDDMasked128 + OpAMD64VPSHRDDMasked256 + OpAMD64VPSHRDDMasked512 + OpAMD64VPSHRDQMasked128 + OpAMD64VPSHRDQMasked256 + OpAMD64VPSHRDQMasked512 OpARMADD OpARMADDconst @@ -4449,1797 +4449,1797 @@ const ( OpStoreMask64x2 OpStoreMask64x4 OpStoreMask64x8 - OpAddFloat32x16 - OpAddMaskedFloat32x16 - OpApproximateReciprocalFloat32x16 - OpApproximateReciprocalMaskedFloat32x16 - OpApproximateReciprocalOfSqrtFloat32x16 - OpApproximateReciprocalOfSqrtMaskedFloat32x16 - OpCompressFloat32x16 - OpDivFloat32x16 - OpDivMaskedFloat32x16 - OpEqualFloat32x16 - OpEqualMaskedFloat32x16 - OpFusedMultiplyAddFloat32x16 - OpFusedMultiplyAddMaskedFloat32x16 - OpFusedMultiplyAddSubFloat32x16 - OpFusedMultiplyAddSubMaskedFloat32x16 - OpFusedMultiplySubAddFloat32x16 - OpFusedMultiplySubAddMaskedFloat32x16 - OpGreaterFloat32x16 - OpGreaterEqualFloat32x16 - OpGreaterEqualMaskedFloat32x16 - OpGreaterMaskedFloat32x16 - OpIsNanFloat32x16 - OpIsNanMaskedFloat32x16 - OpLessFloat32x16 - OpLessEqualFloat32x16 - OpLessEqualMaskedFloat32x16 - OpLessMaskedFloat32x16 - OpMaxFloat32x16 - OpMaxMaskedFloat32x16 - OpMinFloat32x16 - OpMinMaskedFloat32x16 - OpMulFloat32x16 - OpMulByPowOf2Float32x16 - OpMulByPowOf2MaskedFloat32x16 - OpMulMaskedFloat32x16 - OpNotEqualFloat32x16 - OpNotEqualMaskedFloat32x16 - OpSqrtFloat32x16 - OpSqrtMaskedFloat32x16 - OpSubFloat32x16 - OpSubMaskedFloat32x16 + OpAbsoluteInt8x16 + OpAbsoluteInt8x32 + OpAbsoluteInt8x64 + OpAbsoluteInt16x8 + OpAbsoluteInt16x16 + OpAbsoluteInt16x32 + OpAbsoluteInt32x4 + OpAbsoluteInt32x8 + OpAbsoluteInt32x16 + OpAbsoluteInt64x2 + OpAbsoluteInt64x4 + OpAbsoluteInt64x8 + OpAbsoluteMaskedInt8x16 + OpAbsoluteMaskedInt8x32 + OpAbsoluteMaskedInt8x64 + OpAbsoluteMaskedInt16x8 + OpAbsoluteMaskedInt16x16 + OpAbsoluteMaskedInt16x32 + OpAbsoluteMaskedInt32x4 + OpAbsoluteMaskedInt32x8 + OpAbsoluteMaskedInt32x16 + OpAbsoluteMaskedInt64x2 + OpAbsoluteMaskedInt64x4 + OpAbsoluteMaskedInt64x8 OpAddFloat32x4 + OpAddFloat32x8 + OpAddFloat32x16 + OpAddFloat64x2 + OpAddFloat64x4 + OpAddFloat64x8 + OpAddInt8x16 + OpAddInt8x32 + OpAddInt8x64 + OpAddInt16x8 + OpAddInt16x16 + OpAddInt16x32 + OpAddInt32x4 + OpAddInt32x8 + OpAddInt32x16 + OpAddInt64x2 + OpAddInt64x4 + OpAddInt64x8 OpAddMaskedFloat32x4 + OpAddMaskedFloat32x8 + OpAddMaskedFloat32x16 + OpAddMaskedFloat64x2 + OpAddMaskedFloat64x4 + OpAddMaskedFloat64x8 + OpAddMaskedInt8x16 + OpAddMaskedInt8x32 + OpAddMaskedInt8x64 + OpAddMaskedInt16x8 + OpAddMaskedInt16x16 + OpAddMaskedInt16x32 + OpAddMaskedInt32x4 + OpAddMaskedInt32x8 + OpAddMaskedInt32x16 + OpAddMaskedInt64x2 + OpAddMaskedInt64x4 + OpAddMaskedInt64x8 + OpAddMaskedUint8x16 + OpAddMaskedUint8x32 + OpAddMaskedUint8x64 + OpAddMaskedUint16x8 + OpAddMaskedUint16x16 + OpAddMaskedUint16x32 + OpAddMaskedUint32x4 + OpAddMaskedUint32x8 + OpAddMaskedUint32x16 + OpAddMaskedUint64x2 + OpAddMaskedUint64x4 + OpAddMaskedUint64x8 OpAddSubFloat32x4 + OpAddSubFloat32x8 + OpAddSubFloat64x2 + OpAddSubFloat64x4 + OpAddUint8x16 + OpAddUint8x32 + OpAddUint8x64 + OpAddUint16x8 + OpAddUint16x16 + OpAddUint16x32 + OpAddUint32x4 + OpAddUint32x8 + OpAddUint32x16 + OpAddUint64x2 + OpAddUint64x4 + OpAddUint64x8 + OpAndInt8x16 + OpAndInt8x32 + OpAndInt16x8 + OpAndInt16x16 + OpAndInt32x4 + OpAndInt32x8 + OpAndInt32x16 + OpAndInt64x2 + OpAndInt64x4 + OpAndInt64x8 + OpAndMaskedInt32x4 + OpAndMaskedInt32x8 + OpAndMaskedInt32x16 + OpAndMaskedInt64x2 + OpAndMaskedInt64x4 + OpAndMaskedInt64x8 + OpAndMaskedUint32x4 + OpAndMaskedUint32x8 + OpAndMaskedUint32x16 + OpAndMaskedUint64x2 + OpAndMaskedUint64x4 + OpAndMaskedUint64x8 + OpAndNotInt8x16 + OpAndNotInt8x32 + OpAndNotInt16x8 + OpAndNotInt16x16 + OpAndNotInt32x4 + OpAndNotInt32x8 + OpAndNotInt32x16 + OpAndNotInt64x2 + OpAndNotInt64x4 + OpAndNotInt64x8 + OpAndNotMaskedInt32x4 + OpAndNotMaskedInt32x8 + OpAndNotMaskedInt32x16 + OpAndNotMaskedInt64x2 + OpAndNotMaskedInt64x4 + OpAndNotMaskedInt64x8 + OpAndNotMaskedUint32x4 + OpAndNotMaskedUint32x8 + OpAndNotMaskedUint32x16 + OpAndNotMaskedUint64x2 + OpAndNotMaskedUint64x4 + OpAndNotMaskedUint64x8 + OpAndNotUint8x16 + OpAndNotUint8x32 + OpAndNotUint16x8 + OpAndNotUint16x16 + OpAndNotUint32x4 + OpAndNotUint32x8 + OpAndNotUint32x16 + OpAndNotUint64x2 + OpAndNotUint64x4 + OpAndNotUint64x8 + OpAndUint8x16 + OpAndUint8x32 + OpAndUint16x8 + OpAndUint16x16 + OpAndUint32x4 + OpAndUint32x8 + OpAndUint32x16 + OpAndUint64x2 + OpAndUint64x4 + OpAndUint64x8 OpApproximateReciprocalFloat32x4 + OpApproximateReciprocalFloat32x8 + OpApproximateReciprocalFloat32x16 + OpApproximateReciprocalFloat64x2 + OpApproximateReciprocalFloat64x4 + OpApproximateReciprocalFloat64x8 OpApproximateReciprocalMaskedFloat32x4 + OpApproximateReciprocalMaskedFloat32x8 + OpApproximateReciprocalMaskedFloat32x16 + OpApproximateReciprocalMaskedFloat64x2 + OpApproximateReciprocalMaskedFloat64x4 + OpApproximateReciprocalMaskedFloat64x8 OpApproximateReciprocalOfSqrtFloat32x4 + OpApproximateReciprocalOfSqrtFloat32x8 + OpApproximateReciprocalOfSqrtFloat32x16 + OpApproximateReciprocalOfSqrtFloat64x2 + OpApproximateReciprocalOfSqrtFloat64x4 + OpApproximateReciprocalOfSqrtFloat64x8 OpApproximateReciprocalOfSqrtMaskedFloat32x4 + OpApproximateReciprocalOfSqrtMaskedFloat32x8 + OpApproximateReciprocalOfSqrtMaskedFloat32x16 + OpApproximateReciprocalOfSqrtMaskedFloat64x2 + OpApproximateReciprocalOfSqrtMaskedFloat64x4 + OpApproximateReciprocalOfSqrtMaskedFloat64x8 + OpAverageMaskedUint8x16 + OpAverageMaskedUint8x32 + OpAverageMaskedUint8x64 + OpAverageMaskedUint16x8 + OpAverageMaskedUint16x16 + OpAverageMaskedUint16x32 + OpAverageUint8x16 + OpAverageUint8x32 + OpAverageUint8x64 + OpAverageUint16x8 + OpAverageUint16x16 + OpAverageUint16x32 OpCeilFloat32x4 + OpCeilFloat32x8 + OpCeilFloat64x2 + OpCeilFloat64x4 OpCompressFloat32x4 + OpCompressFloat32x8 + OpCompressFloat32x16 + OpCompressFloat64x2 + OpCompressFloat64x4 + OpCompressFloat64x8 + OpCompressInt8x16 + OpCompressInt8x32 + OpCompressInt8x64 + OpCompressInt16x8 + OpCompressInt16x16 + OpCompressInt16x32 + OpCompressInt32x4 + OpCompressInt32x8 + OpCompressInt32x16 + OpCompressInt64x2 + OpCompressInt64x4 + OpCompressInt64x8 + OpCompressUint8x16 + OpCompressUint8x32 + OpCompressUint8x64 + OpCompressUint16x8 + OpCompressUint16x16 + OpCompressUint16x32 + OpCompressUint32x4 + OpCompressUint32x8 + OpCompressUint32x16 + OpCompressUint64x2 + OpCompressUint64x4 + OpCompressUint64x8 OpDivFloat32x4 + OpDivFloat32x8 + OpDivFloat32x16 + OpDivFloat64x2 + OpDivFloat64x4 + OpDivFloat64x8 OpDivMaskedFloat32x4 + OpDivMaskedFloat32x8 + OpDivMaskedFloat32x16 + OpDivMaskedFloat64x2 + OpDivMaskedFloat64x4 + OpDivMaskedFloat64x8 OpDotProdBroadcastFloat32x4 + OpDotProdBroadcastFloat32x8 + OpDotProdBroadcastFloat64x2 OpEqualFloat32x4 + OpEqualFloat32x8 + OpEqualFloat32x16 + OpEqualFloat64x2 + OpEqualFloat64x4 + OpEqualFloat64x8 + OpEqualInt8x16 + OpEqualInt8x32 + OpEqualInt8x64 + OpEqualInt16x8 + OpEqualInt16x16 + OpEqualInt16x32 + OpEqualInt32x4 + OpEqualInt32x8 + OpEqualInt32x16 + OpEqualInt64x2 + OpEqualInt64x4 + OpEqualInt64x8 OpEqualMaskedFloat32x4 - OpFloorFloat32x4 - OpFusedMultiplyAddFloat32x4 - OpFusedMultiplyAddMaskedFloat32x4 - OpFusedMultiplyAddSubFloat32x4 - OpFusedMultiplyAddSubMaskedFloat32x4 - OpFusedMultiplySubAddFloat32x4 - OpFusedMultiplySubAddMaskedFloat32x4 - OpGreaterFloat32x4 - OpGreaterEqualFloat32x4 - OpGreaterEqualMaskedFloat32x4 - OpGreaterMaskedFloat32x4 - OpIsNanFloat32x4 - OpIsNanMaskedFloat32x4 - OpLessFloat32x4 - OpLessEqualFloat32x4 - OpLessEqualMaskedFloat32x4 - OpLessMaskedFloat32x4 - OpMaxFloat32x4 - OpMaxMaskedFloat32x4 - OpMinFloat32x4 - OpMinMaskedFloat32x4 - OpMulFloat32x4 - OpMulByPowOf2Float32x4 - OpMulByPowOf2MaskedFloat32x4 - OpMulMaskedFloat32x4 - OpNotEqualFloat32x4 - OpNotEqualMaskedFloat32x4 - OpPairwiseAddFloat32x4 - OpPairwiseSubFloat32x4 - OpRoundFloat32x4 - OpSqrtFloat32x4 - OpSqrtMaskedFloat32x4 - OpSubFloat32x4 - OpSubMaskedFloat32x4 - OpTruncFloat32x4 - OpAddFloat32x8 - OpAddMaskedFloat32x8 - OpAddSubFloat32x8 - OpApproximateReciprocalFloat32x8 - OpApproximateReciprocalMaskedFloat32x8 - OpApproximateReciprocalOfSqrtFloat32x8 - OpApproximateReciprocalOfSqrtMaskedFloat32x8 - OpCeilFloat32x8 - OpCompressFloat32x8 - OpDivFloat32x8 - OpDivMaskedFloat32x8 - OpDotProdBroadcastFloat32x8 - OpEqualFloat32x8 OpEqualMaskedFloat32x8 + OpEqualMaskedFloat32x16 + OpEqualMaskedFloat64x2 + OpEqualMaskedFloat64x4 + OpEqualMaskedFloat64x8 + OpEqualMaskedInt8x16 + OpEqualMaskedInt8x32 + OpEqualMaskedInt8x64 + OpEqualMaskedInt16x8 + OpEqualMaskedInt16x16 + OpEqualMaskedInt16x32 + OpEqualMaskedInt32x4 + OpEqualMaskedInt32x8 + OpEqualMaskedInt32x16 + OpEqualMaskedInt64x2 + OpEqualMaskedInt64x4 + OpEqualMaskedInt64x8 + OpEqualMaskedUint8x16 + OpEqualMaskedUint8x32 + OpEqualMaskedUint8x64 + OpEqualMaskedUint16x8 + OpEqualMaskedUint16x16 + OpEqualMaskedUint16x32 + OpEqualMaskedUint32x4 + OpEqualMaskedUint32x8 + OpEqualMaskedUint32x16 + OpEqualMaskedUint64x2 + OpEqualMaskedUint64x4 + OpEqualMaskedUint64x8 + OpEqualUint8x16 + OpEqualUint8x32 + OpEqualUint8x64 + OpEqualUint16x8 + OpEqualUint16x16 + OpEqualUint16x32 + OpEqualUint32x4 + OpEqualUint32x8 + OpEqualUint32x16 + OpEqualUint64x2 + OpEqualUint64x4 + OpEqualUint64x8 + OpFloorFloat32x4 OpFloorFloat32x8 + OpFloorFloat64x2 + OpFloorFloat64x4 + OpFusedMultiplyAddFloat32x4 OpFusedMultiplyAddFloat32x8 + OpFusedMultiplyAddFloat32x16 + OpFusedMultiplyAddFloat64x2 + OpFusedMultiplyAddFloat64x4 + OpFusedMultiplyAddFloat64x8 + OpFusedMultiplyAddMaskedFloat32x4 OpFusedMultiplyAddMaskedFloat32x8 + OpFusedMultiplyAddMaskedFloat32x16 + OpFusedMultiplyAddMaskedFloat64x2 + OpFusedMultiplyAddMaskedFloat64x4 + OpFusedMultiplyAddMaskedFloat64x8 + OpFusedMultiplyAddSubFloat32x4 OpFusedMultiplyAddSubFloat32x8 + OpFusedMultiplyAddSubFloat32x16 + OpFusedMultiplyAddSubFloat64x2 + OpFusedMultiplyAddSubFloat64x4 + OpFusedMultiplyAddSubFloat64x8 + OpFusedMultiplyAddSubMaskedFloat32x4 OpFusedMultiplyAddSubMaskedFloat32x8 + OpFusedMultiplyAddSubMaskedFloat32x16 + OpFusedMultiplyAddSubMaskedFloat64x2 + OpFusedMultiplyAddSubMaskedFloat64x4 + OpFusedMultiplyAddSubMaskedFloat64x8 + OpFusedMultiplySubAddFloat32x4 OpFusedMultiplySubAddFloat32x8 + OpFusedMultiplySubAddFloat32x16 + OpFusedMultiplySubAddFloat64x2 + OpFusedMultiplySubAddFloat64x4 + OpFusedMultiplySubAddFloat64x8 + OpFusedMultiplySubAddMaskedFloat32x4 OpFusedMultiplySubAddMaskedFloat32x8 - OpGreaterFloat32x8 + OpFusedMultiplySubAddMaskedFloat32x16 + OpFusedMultiplySubAddMaskedFloat64x2 + OpFusedMultiplySubAddMaskedFloat64x4 + OpFusedMultiplySubAddMaskedFloat64x8 + OpGaloisFieldMulMaskedUint8x16 + OpGaloisFieldMulMaskedUint8x32 + OpGaloisFieldMulMaskedUint8x64 + OpGaloisFieldMulUint8x16 + OpGaloisFieldMulUint8x32 + OpGaloisFieldMulUint8x64 + OpGreaterEqualFloat32x4 OpGreaterEqualFloat32x8 + OpGreaterEqualFloat32x16 + OpGreaterEqualFloat64x2 + OpGreaterEqualFloat64x4 + OpGreaterEqualFloat64x8 + OpGreaterEqualInt8x16 + OpGreaterEqualInt8x32 + OpGreaterEqualInt8x64 + OpGreaterEqualInt16x8 + OpGreaterEqualInt16x16 + OpGreaterEqualInt16x32 + OpGreaterEqualInt32x4 + OpGreaterEqualInt32x8 + OpGreaterEqualInt32x16 + OpGreaterEqualInt64x2 + OpGreaterEqualInt64x4 + OpGreaterEqualInt64x8 + OpGreaterEqualMaskedFloat32x4 OpGreaterEqualMaskedFloat32x8 + OpGreaterEqualMaskedFloat32x16 + OpGreaterEqualMaskedFloat64x2 + OpGreaterEqualMaskedFloat64x4 + OpGreaterEqualMaskedFloat64x8 + OpGreaterEqualMaskedInt8x16 + OpGreaterEqualMaskedInt8x32 + OpGreaterEqualMaskedInt8x64 + OpGreaterEqualMaskedInt16x8 + OpGreaterEqualMaskedInt16x16 + OpGreaterEqualMaskedInt16x32 + OpGreaterEqualMaskedInt32x4 + OpGreaterEqualMaskedInt32x8 + OpGreaterEqualMaskedInt32x16 + OpGreaterEqualMaskedInt64x2 + OpGreaterEqualMaskedInt64x4 + OpGreaterEqualMaskedInt64x8 + OpGreaterEqualMaskedUint8x16 + OpGreaterEqualMaskedUint8x32 + OpGreaterEqualMaskedUint8x64 + OpGreaterEqualMaskedUint16x8 + OpGreaterEqualMaskedUint16x16 + OpGreaterEqualMaskedUint16x32 + OpGreaterEqualMaskedUint32x4 + OpGreaterEqualMaskedUint32x8 + OpGreaterEqualMaskedUint32x16 + OpGreaterEqualMaskedUint64x2 + OpGreaterEqualMaskedUint64x4 + OpGreaterEqualMaskedUint64x8 + OpGreaterEqualUint8x16 + OpGreaterEqualUint8x32 + OpGreaterEqualUint8x64 + OpGreaterEqualUint16x8 + OpGreaterEqualUint16x16 + OpGreaterEqualUint16x32 + OpGreaterEqualUint32x4 + OpGreaterEqualUint32x8 + OpGreaterEqualUint32x16 + OpGreaterEqualUint64x2 + OpGreaterEqualUint64x4 + OpGreaterEqualUint64x8 + OpGreaterFloat32x4 + OpGreaterFloat32x8 + OpGreaterFloat32x16 + OpGreaterFloat64x2 + OpGreaterFloat64x4 + OpGreaterFloat64x8 + OpGreaterInt8x16 + OpGreaterInt8x32 + OpGreaterInt8x64 + OpGreaterInt16x8 + OpGreaterInt16x16 + OpGreaterInt16x32 + OpGreaterInt32x4 + OpGreaterInt32x8 + OpGreaterInt32x16 + OpGreaterInt64x2 + OpGreaterInt64x4 + OpGreaterInt64x8 + OpGreaterMaskedFloat32x4 OpGreaterMaskedFloat32x8 + OpGreaterMaskedFloat32x16 + OpGreaterMaskedFloat64x2 + OpGreaterMaskedFloat64x4 + OpGreaterMaskedFloat64x8 + OpGreaterMaskedInt8x16 + OpGreaterMaskedInt8x32 + OpGreaterMaskedInt8x64 + OpGreaterMaskedInt16x8 + OpGreaterMaskedInt16x16 + OpGreaterMaskedInt16x32 + OpGreaterMaskedInt32x4 + OpGreaterMaskedInt32x8 + OpGreaterMaskedInt32x16 + OpGreaterMaskedInt64x2 + OpGreaterMaskedInt64x4 + OpGreaterMaskedInt64x8 + OpGreaterMaskedUint8x16 + OpGreaterMaskedUint8x32 + OpGreaterMaskedUint8x64 + OpGreaterMaskedUint16x8 + OpGreaterMaskedUint16x16 + OpGreaterMaskedUint16x32 + OpGreaterMaskedUint32x4 + OpGreaterMaskedUint32x8 + OpGreaterMaskedUint32x16 + OpGreaterMaskedUint64x2 + OpGreaterMaskedUint64x4 + OpGreaterMaskedUint64x8 + OpGreaterUint8x16 + OpGreaterUint8x32 + OpGreaterUint8x64 + OpGreaterUint16x8 + OpGreaterUint16x16 + OpGreaterUint16x32 + OpGreaterUint32x4 + OpGreaterUint32x8 + OpGreaterUint32x16 + OpGreaterUint64x2 + OpGreaterUint64x4 + OpGreaterUint64x8 + OpIsNanFloat32x4 OpIsNanFloat32x8 + OpIsNanFloat32x16 + OpIsNanFloat64x2 + OpIsNanFloat64x4 + OpIsNanFloat64x8 + OpIsNanMaskedFloat32x4 OpIsNanMaskedFloat32x8 - OpLessFloat32x8 + OpIsNanMaskedFloat32x16 + OpIsNanMaskedFloat64x2 + OpIsNanMaskedFloat64x4 + OpIsNanMaskedFloat64x8 + OpLessEqualFloat32x4 OpLessEqualFloat32x8 + OpLessEqualFloat32x16 + OpLessEqualFloat64x2 + OpLessEqualFloat64x4 + OpLessEqualFloat64x8 + OpLessEqualInt8x16 + OpLessEqualInt8x32 + OpLessEqualInt8x64 + OpLessEqualInt16x8 + OpLessEqualInt16x16 + OpLessEqualInt16x32 + OpLessEqualInt32x4 + OpLessEqualInt32x8 + OpLessEqualInt32x16 + OpLessEqualInt64x2 + OpLessEqualInt64x4 + OpLessEqualInt64x8 + OpLessEqualMaskedFloat32x4 OpLessEqualMaskedFloat32x8 + OpLessEqualMaskedFloat32x16 + OpLessEqualMaskedFloat64x2 + OpLessEqualMaskedFloat64x4 + OpLessEqualMaskedFloat64x8 + OpLessEqualMaskedInt8x16 + OpLessEqualMaskedInt8x32 + OpLessEqualMaskedInt8x64 + OpLessEqualMaskedInt16x8 + OpLessEqualMaskedInt16x16 + OpLessEqualMaskedInt16x32 + OpLessEqualMaskedInt32x4 + OpLessEqualMaskedInt32x8 + OpLessEqualMaskedInt32x16 + OpLessEqualMaskedInt64x2 + OpLessEqualMaskedInt64x4 + OpLessEqualMaskedInt64x8 + OpLessEqualMaskedUint8x16 + OpLessEqualMaskedUint8x32 + OpLessEqualMaskedUint8x64 + OpLessEqualMaskedUint16x8 + OpLessEqualMaskedUint16x16 + OpLessEqualMaskedUint16x32 + OpLessEqualMaskedUint32x4 + OpLessEqualMaskedUint32x8 + OpLessEqualMaskedUint32x16 + OpLessEqualMaskedUint64x2 + OpLessEqualMaskedUint64x4 + OpLessEqualMaskedUint64x8 + OpLessEqualUint8x16 + OpLessEqualUint8x32 + OpLessEqualUint8x64 + OpLessEqualUint16x8 + OpLessEqualUint16x16 + OpLessEqualUint16x32 + OpLessEqualUint32x4 + OpLessEqualUint32x8 + OpLessEqualUint32x16 + OpLessEqualUint64x2 + OpLessEqualUint64x4 + OpLessEqualUint64x8 + OpLessFloat32x4 + OpLessFloat32x8 + OpLessFloat32x16 + OpLessFloat64x2 + OpLessFloat64x4 + OpLessFloat64x8 + OpLessInt8x16 + OpLessInt8x32 + OpLessInt8x64 + OpLessInt16x8 + OpLessInt16x16 + OpLessInt16x32 + OpLessInt32x4 + OpLessInt32x8 + OpLessInt32x16 + OpLessInt64x2 + OpLessInt64x4 + OpLessInt64x8 + OpLessMaskedFloat32x4 OpLessMaskedFloat32x8 + OpLessMaskedFloat32x16 + OpLessMaskedFloat64x2 + OpLessMaskedFloat64x4 + OpLessMaskedFloat64x8 + OpLessMaskedInt8x16 + OpLessMaskedInt8x32 + OpLessMaskedInt8x64 + OpLessMaskedInt16x8 + OpLessMaskedInt16x16 + OpLessMaskedInt16x32 + OpLessMaskedInt32x4 + OpLessMaskedInt32x8 + OpLessMaskedInt32x16 + OpLessMaskedInt64x2 + OpLessMaskedInt64x4 + OpLessMaskedInt64x8 + OpLessMaskedUint8x16 + OpLessMaskedUint8x32 + OpLessMaskedUint8x64 + OpLessMaskedUint16x8 + OpLessMaskedUint16x16 + OpLessMaskedUint16x32 + OpLessMaskedUint32x4 + OpLessMaskedUint32x8 + OpLessMaskedUint32x16 + OpLessMaskedUint64x2 + OpLessMaskedUint64x4 + OpLessMaskedUint64x8 + OpLessUint8x16 + OpLessUint8x32 + OpLessUint8x64 + OpLessUint16x8 + OpLessUint16x16 + OpLessUint16x32 + OpLessUint32x4 + OpLessUint32x8 + OpLessUint32x16 + OpLessUint64x2 + OpLessUint64x4 + OpLessUint64x8 + OpMaxFloat32x4 OpMaxFloat32x8 + OpMaxFloat32x16 + OpMaxFloat64x2 + OpMaxFloat64x4 + OpMaxFloat64x8 + OpMaxInt8x16 + OpMaxInt8x32 + OpMaxInt8x64 + OpMaxInt16x8 + OpMaxInt16x16 + OpMaxInt16x32 + OpMaxInt32x4 + OpMaxInt32x8 + OpMaxInt32x16 + OpMaxInt64x2 + OpMaxInt64x4 + OpMaxInt64x8 + OpMaxMaskedFloat32x4 OpMaxMaskedFloat32x8 + OpMaxMaskedFloat32x16 + OpMaxMaskedFloat64x2 + OpMaxMaskedFloat64x4 + OpMaxMaskedFloat64x8 + OpMaxMaskedInt8x16 + OpMaxMaskedInt8x32 + OpMaxMaskedInt8x64 + OpMaxMaskedInt16x8 + OpMaxMaskedInt16x16 + OpMaxMaskedInt16x32 + OpMaxMaskedInt32x4 + OpMaxMaskedInt32x8 + OpMaxMaskedInt32x16 + OpMaxMaskedInt64x2 + OpMaxMaskedInt64x4 + OpMaxMaskedInt64x8 + OpMaxMaskedUint8x16 + OpMaxMaskedUint8x32 + OpMaxMaskedUint8x64 + OpMaxMaskedUint16x8 + OpMaxMaskedUint16x16 + OpMaxMaskedUint16x32 + OpMaxMaskedUint32x4 + OpMaxMaskedUint32x8 + OpMaxMaskedUint32x16 + OpMaxMaskedUint64x2 + OpMaxMaskedUint64x4 + OpMaxMaskedUint64x8 + OpMaxUint8x16 + OpMaxUint8x32 + OpMaxUint8x64 + OpMaxUint16x8 + OpMaxUint16x16 + OpMaxUint16x32 + OpMaxUint32x4 + OpMaxUint32x8 + OpMaxUint32x16 + OpMaxUint64x2 + OpMaxUint64x4 + OpMaxUint64x8 + OpMinFloat32x4 OpMinFloat32x8 + OpMinFloat32x16 + OpMinFloat64x2 + OpMinFloat64x4 + OpMinFloat64x8 + OpMinInt8x16 + OpMinInt8x32 + OpMinInt8x64 + OpMinInt16x8 + OpMinInt16x16 + OpMinInt16x32 + OpMinInt32x4 + OpMinInt32x8 + OpMinInt32x16 + OpMinInt64x2 + OpMinInt64x4 + OpMinInt64x8 + OpMinMaskedFloat32x4 OpMinMaskedFloat32x8 - OpMulFloat32x8 + OpMinMaskedFloat32x16 + OpMinMaskedFloat64x2 + OpMinMaskedFloat64x4 + OpMinMaskedFloat64x8 + OpMinMaskedInt8x16 + OpMinMaskedInt8x32 + OpMinMaskedInt8x64 + OpMinMaskedInt16x8 + OpMinMaskedInt16x16 + OpMinMaskedInt16x32 + OpMinMaskedInt32x4 + OpMinMaskedInt32x8 + OpMinMaskedInt32x16 + OpMinMaskedInt64x2 + OpMinMaskedInt64x4 + OpMinMaskedInt64x8 + OpMinMaskedUint8x16 + OpMinMaskedUint8x32 + OpMinMaskedUint8x64 + OpMinMaskedUint16x8 + OpMinMaskedUint16x16 + OpMinMaskedUint16x32 + OpMinMaskedUint32x4 + OpMinMaskedUint32x8 + OpMinMaskedUint32x16 + OpMinMaskedUint64x2 + OpMinMaskedUint64x4 + OpMinMaskedUint64x8 + OpMinUint8x16 + OpMinUint8x32 + OpMinUint8x64 + OpMinUint16x8 + OpMinUint16x16 + OpMinUint16x32 + OpMinUint32x4 + OpMinUint32x8 + OpMinUint32x16 + OpMinUint64x2 + OpMinUint64x4 + OpMinUint64x8 + OpMulByPowOf2Float32x4 OpMulByPowOf2Float32x8 + OpMulByPowOf2Float32x16 + OpMulByPowOf2Float64x2 + OpMulByPowOf2Float64x4 + OpMulByPowOf2Float64x8 + OpMulByPowOf2MaskedFloat32x4 OpMulByPowOf2MaskedFloat32x8 - OpMulMaskedFloat32x8 - OpNotEqualFloat32x8 - OpNotEqualMaskedFloat32x8 - OpPairwiseAddFloat32x8 - OpPairwiseSubFloat32x8 - OpRoundFloat32x8 - OpSqrtFloat32x8 - OpSqrtMaskedFloat32x8 - OpSubFloat32x8 - OpSubMaskedFloat32x8 - OpTruncFloat32x8 - OpAddFloat64x2 - OpAddMaskedFloat64x2 - OpAddSubFloat64x2 - OpApproximateReciprocalFloat64x2 - OpApproximateReciprocalMaskedFloat64x2 - OpApproximateReciprocalOfSqrtFloat64x2 - OpApproximateReciprocalOfSqrtMaskedFloat64x2 - OpCeilFloat64x2 - OpCompressFloat64x2 - OpDivFloat64x2 - OpDivMaskedFloat64x2 - OpDotProdBroadcastFloat64x2 - OpEqualFloat64x2 - OpEqualMaskedFloat64x2 - OpFloorFloat64x2 - OpFusedMultiplyAddFloat64x2 - OpFusedMultiplyAddMaskedFloat64x2 - OpFusedMultiplyAddSubFloat64x2 - OpFusedMultiplyAddSubMaskedFloat64x2 - OpFusedMultiplySubAddFloat64x2 - OpFusedMultiplySubAddMaskedFloat64x2 - OpGreaterFloat64x2 - OpGreaterEqualFloat64x2 - OpGreaterEqualMaskedFloat64x2 - OpGreaterMaskedFloat64x2 - OpIsNanFloat64x2 - OpIsNanMaskedFloat64x2 - OpLessFloat64x2 - OpLessEqualFloat64x2 - OpLessEqualMaskedFloat64x2 - OpLessMaskedFloat64x2 - OpMaxFloat64x2 - OpMaxMaskedFloat64x2 - OpMinFloat64x2 - OpMinMaskedFloat64x2 - OpMulFloat64x2 - OpMulByPowOf2Float64x2 + OpMulByPowOf2MaskedFloat32x16 OpMulByPowOf2MaskedFloat64x2 - OpMulMaskedFloat64x2 - OpNotEqualFloat64x2 - OpNotEqualMaskedFloat64x2 - OpPairwiseAddFloat64x2 - OpPairwiseSubFloat64x2 - OpRoundFloat64x2 - OpSqrtFloat64x2 - OpSqrtMaskedFloat64x2 - OpSubFloat64x2 - OpSubMaskedFloat64x2 - OpTruncFloat64x2 - OpAddFloat64x4 - OpAddMaskedFloat64x4 - OpAddSubFloat64x4 - OpApproximateReciprocalFloat64x4 - OpApproximateReciprocalMaskedFloat64x4 - OpApproximateReciprocalOfSqrtFloat64x4 - OpApproximateReciprocalOfSqrtMaskedFloat64x4 - OpCeilFloat64x4 - OpCompressFloat64x4 - OpDivFloat64x4 - OpDivMaskedFloat64x4 - OpEqualFloat64x4 - OpEqualMaskedFloat64x4 - OpFloorFloat64x4 - OpFusedMultiplyAddFloat64x4 - OpFusedMultiplyAddMaskedFloat64x4 - OpFusedMultiplyAddSubFloat64x4 - OpFusedMultiplyAddSubMaskedFloat64x4 - OpFusedMultiplySubAddFloat64x4 - OpFusedMultiplySubAddMaskedFloat64x4 - OpGreaterFloat64x4 - OpGreaterEqualFloat64x4 - OpGreaterEqualMaskedFloat64x4 - OpGreaterMaskedFloat64x4 - OpIsNanFloat64x4 - OpIsNanMaskedFloat64x4 - OpLessFloat64x4 - OpLessEqualFloat64x4 - OpLessEqualMaskedFloat64x4 - OpLessMaskedFloat64x4 - OpMaxFloat64x4 - OpMaxMaskedFloat64x4 - OpMinFloat64x4 - OpMinMaskedFloat64x4 - OpMulFloat64x4 - OpMulByPowOf2Float64x4 OpMulByPowOf2MaskedFloat64x4 - OpMulMaskedFloat64x4 - OpNotEqualFloat64x4 - OpNotEqualMaskedFloat64x4 - OpPairwiseAddFloat64x4 - OpPairwiseSubFloat64x4 - OpRoundFloat64x4 - OpSqrtFloat64x4 - OpSqrtMaskedFloat64x4 - OpSubFloat64x4 - OpSubMaskedFloat64x4 - OpTruncFloat64x4 - OpAddFloat64x8 - OpAddMaskedFloat64x8 - OpApproximateReciprocalFloat64x8 - OpApproximateReciprocalMaskedFloat64x8 - OpApproximateReciprocalOfSqrtFloat64x8 - OpApproximateReciprocalOfSqrtMaskedFloat64x8 - OpCompressFloat64x8 - OpDivFloat64x8 - OpDivMaskedFloat64x8 - OpEqualFloat64x8 - OpEqualMaskedFloat64x8 - OpFusedMultiplyAddFloat64x8 - OpFusedMultiplyAddMaskedFloat64x8 - OpFusedMultiplyAddSubFloat64x8 - OpFusedMultiplyAddSubMaskedFloat64x8 - OpFusedMultiplySubAddFloat64x8 - OpFusedMultiplySubAddMaskedFloat64x8 - OpGreaterFloat64x8 - OpGreaterEqualFloat64x8 - OpGreaterEqualMaskedFloat64x8 - OpGreaterMaskedFloat64x8 - OpIsNanFloat64x8 - OpIsNanMaskedFloat64x8 - OpLessFloat64x8 - OpLessEqualFloat64x8 - OpLessEqualMaskedFloat64x8 - OpLessMaskedFloat64x8 - OpMaxFloat64x8 - OpMaxMaskedFloat64x8 - OpMinFloat64x8 - OpMinMaskedFloat64x8 - OpMulFloat64x8 - OpMulByPowOf2Float64x8 OpMulByPowOf2MaskedFloat64x8 - OpMulMaskedFloat64x8 - OpNotEqualFloat64x8 - OpNotEqualMaskedFloat64x8 - OpSqrtFloat64x8 - OpSqrtMaskedFloat64x8 - OpSubFloat64x8 - OpSubMaskedFloat64x8 - OpAbsoluteInt16x16 - OpAbsoluteMaskedInt16x16 - OpAddInt16x16 - OpAddMaskedInt16x16 - OpAndInt16x16 - OpAndNotInt16x16 - OpCompressInt16x16 - OpEqualInt16x16 - OpEqualMaskedInt16x16 - OpGreaterInt16x16 - OpGreaterEqualInt16x16 - OpGreaterEqualMaskedInt16x16 - OpGreaterMaskedInt16x16 - OpLessInt16x16 - OpLessEqualInt16x16 - OpLessEqualMaskedInt16x16 - OpLessMaskedInt16x16 - OpMaxInt16x16 - OpMaxMaskedInt16x16 - OpMinInt16x16 - OpMinMaskedInt16x16 + OpMulEvenWidenInt32x4 + OpMulEvenWidenInt32x8 + OpMulEvenWidenInt64x2 + OpMulEvenWidenInt64x4 + OpMulEvenWidenInt64x8 + OpMulEvenWidenMaskedInt64x2 + OpMulEvenWidenMaskedInt64x4 + OpMulEvenWidenMaskedInt64x8 + OpMulEvenWidenMaskedUint64x2 + OpMulEvenWidenMaskedUint64x4 + OpMulEvenWidenMaskedUint64x8 + OpMulEvenWidenUint32x4 + OpMulEvenWidenUint32x8 + OpMulEvenWidenUint64x2 + OpMulEvenWidenUint64x4 + OpMulEvenWidenUint64x8 + OpMulFloat32x4 + OpMulFloat32x8 + OpMulFloat32x16 + OpMulFloat64x2 + OpMulFloat64x4 + OpMulFloat64x8 + OpMulHighInt16x8 OpMulHighInt16x16 + OpMulHighInt16x32 + OpMulHighMaskedInt16x8 OpMulHighMaskedInt16x16 + OpMulHighMaskedInt16x32 + OpMulHighMaskedUint16x8 + OpMulHighMaskedUint16x16 + OpMulHighMaskedUint16x32 + OpMulHighUint16x8 + OpMulHighUint16x16 + OpMulHighUint16x32 + OpMulLowInt16x8 OpMulLowInt16x16 + OpMulLowInt16x32 + OpMulLowInt32x4 + OpMulLowInt32x8 + OpMulLowInt32x16 + OpMulLowInt64x2 + OpMulLowInt64x4 + OpMulLowInt64x8 + OpMulLowMaskedInt16x8 OpMulLowMaskedInt16x16 + OpMulLowMaskedInt16x32 + OpMulLowMaskedInt32x4 + OpMulLowMaskedInt32x8 + OpMulLowMaskedInt32x16 + OpMulLowMaskedInt64x2 + OpMulLowMaskedInt64x4 + OpMulLowMaskedInt64x8 + OpMulMaskedFloat32x4 + OpMulMaskedFloat32x8 + OpMulMaskedFloat32x16 + OpMulMaskedFloat64x2 + OpMulMaskedFloat64x4 + OpMulMaskedFloat64x8 + OpNotEqualFloat32x4 + OpNotEqualFloat32x8 + OpNotEqualFloat32x16 + OpNotEqualFloat64x2 + OpNotEqualFloat64x4 + OpNotEqualFloat64x8 + OpNotEqualInt8x16 + OpNotEqualInt8x32 + OpNotEqualInt8x64 + OpNotEqualInt16x8 OpNotEqualInt16x16 + OpNotEqualInt16x32 + OpNotEqualInt32x4 + OpNotEqualInt32x8 + OpNotEqualInt32x16 + OpNotEqualInt64x2 + OpNotEqualInt64x4 + OpNotEqualInt64x8 + OpNotEqualMaskedFloat32x4 + OpNotEqualMaskedFloat32x8 + OpNotEqualMaskedFloat32x16 + OpNotEqualMaskedFloat64x2 + OpNotEqualMaskedFloat64x4 + OpNotEqualMaskedFloat64x8 + OpNotEqualMaskedInt8x16 + OpNotEqualMaskedInt8x32 + OpNotEqualMaskedInt8x64 + OpNotEqualMaskedInt16x8 OpNotEqualMaskedInt16x16 + OpNotEqualMaskedInt16x32 + OpNotEqualMaskedInt32x4 + OpNotEqualMaskedInt32x8 + OpNotEqualMaskedInt32x16 + OpNotEqualMaskedInt64x2 + OpNotEqualMaskedInt64x4 + OpNotEqualMaskedInt64x8 + OpNotEqualMaskedUint8x16 + OpNotEqualMaskedUint8x32 + OpNotEqualMaskedUint8x64 + OpNotEqualMaskedUint16x8 + OpNotEqualMaskedUint16x16 + OpNotEqualMaskedUint16x32 + OpNotEqualMaskedUint32x4 + OpNotEqualMaskedUint32x8 + OpNotEqualMaskedUint32x16 + OpNotEqualMaskedUint64x2 + OpNotEqualMaskedUint64x4 + OpNotEqualMaskedUint64x8 + OpNotEqualUint8x16 + OpNotEqualUint8x32 + OpNotEqualUint8x64 + OpNotEqualUint16x8 + OpNotEqualUint16x16 + OpNotEqualUint16x32 + OpNotEqualUint32x4 + OpNotEqualUint32x8 + OpNotEqualUint32x16 + OpNotEqualUint64x2 + OpNotEqualUint64x4 + OpNotEqualUint64x8 + OpOrInt8x16 + OpOrInt8x32 + OpOrInt16x8 OpOrInt16x16 + OpOrInt32x4 + OpOrInt32x8 + OpOrInt32x16 + OpOrInt64x2 + OpOrInt64x4 + OpOrInt64x8 + OpOrMaskedInt32x4 + OpOrMaskedInt32x8 + OpOrMaskedInt32x16 + OpOrMaskedInt64x2 + OpOrMaskedInt64x4 + OpOrMaskedInt64x8 + OpOrMaskedUint32x4 + OpOrMaskedUint32x8 + OpOrMaskedUint32x16 + OpOrMaskedUint64x2 + OpOrMaskedUint64x4 + OpOrMaskedUint64x8 + OpOrUint8x16 + OpOrUint8x32 + OpOrUint16x8 + OpOrUint16x16 + OpOrUint32x4 + OpOrUint32x8 + OpOrUint32x16 + OpOrUint64x2 + OpOrUint64x4 + OpOrUint64x8 + OpPairDotProdAccumulateInt32x4 + OpPairDotProdAccumulateInt32x8 + OpPairDotProdAccumulateInt32x16 + OpPairDotProdAccumulateMaskedInt32x4 + OpPairDotProdAccumulateMaskedInt32x8 + OpPairDotProdAccumulateMaskedInt32x16 + OpPairDotProdInt16x8 OpPairDotProdInt16x16 + OpPairDotProdInt16x32 + OpPairDotProdMaskedInt16x8 OpPairDotProdMaskedInt16x16 + OpPairDotProdMaskedInt16x32 + OpPairwiseAddFloat32x4 + OpPairwiseAddFloat32x8 + OpPairwiseAddFloat64x2 + OpPairwiseAddFloat64x4 + OpPairwiseAddInt16x8 OpPairwiseAddInt16x16 + OpPairwiseAddInt32x4 + OpPairwiseAddInt32x8 + OpPairwiseAddUint16x8 + OpPairwiseAddUint16x16 + OpPairwiseAddUint32x4 + OpPairwiseAddUint32x8 + OpPairwiseSubFloat32x4 + OpPairwiseSubFloat32x8 + OpPairwiseSubFloat64x2 + OpPairwiseSubFloat64x4 + OpPairwiseSubInt16x8 OpPairwiseSubInt16x16 + OpPairwiseSubInt32x4 + OpPairwiseSubInt32x8 + OpPairwiseSubUint16x8 + OpPairwiseSubUint16x16 + OpPairwiseSubUint32x4 + OpPairwiseSubUint32x8 + OpPermute2Float32x4 + OpPermute2Float32x8 + OpPermute2Float32x16 + OpPermute2Float64x2 + OpPermute2Float64x4 + OpPermute2Float64x8 + OpPermute2Int8x16 + OpPermute2Int8x32 + OpPermute2Int8x64 + OpPermute2Int16x8 + OpPermute2Int16x16 + OpPermute2Int16x32 + OpPermute2Int32x4 + OpPermute2Int32x8 + OpPermute2Int32x16 + OpPermute2Int64x2 + OpPermute2Int64x4 + OpPermute2Int64x8 + OpPermute2MaskedFloat32x4 + OpPermute2MaskedFloat32x8 + OpPermute2MaskedFloat32x16 + OpPermute2MaskedFloat64x2 + OpPermute2MaskedFloat64x4 + OpPermute2MaskedFloat64x8 + OpPermute2MaskedInt8x16 + OpPermute2MaskedInt8x32 + OpPermute2MaskedInt8x64 + OpPermute2MaskedInt16x8 + OpPermute2MaskedInt16x16 + OpPermute2MaskedInt16x32 + OpPermute2MaskedInt32x4 + OpPermute2MaskedInt32x8 + OpPermute2MaskedInt32x16 + OpPermute2MaskedInt64x2 + OpPermute2MaskedInt64x4 + OpPermute2MaskedInt64x8 + OpPermute2MaskedUint8x16 + OpPermute2MaskedUint8x32 + OpPermute2MaskedUint8x64 + OpPermute2MaskedUint16x8 + OpPermute2MaskedUint16x16 + OpPermute2MaskedUint16x32 + OpPermute2MaskedUint32x4 + OpPermute2MaskedUint32x8 + OpPermute2MaskedUint32x16 + OpPermute2MaskedUint64x2 + OpPermute2MaskedUint64x4 + OpPermute2MaskedUint64x8 + OpPermute2Uint8x16 + OpPermute2Uint8x32 + OpPermute2Uint8x64 + OpPermute2Uint16x8 + OpPermute2Uint16x16 + OpPermute2Uint16x32 + OpPermute2Uint32x4 + OpPermute2Uint32x8 + OpPermute2Uint32x16 + OpPermute2Uint64x2 + OpPermute2Uint64x4 + OpPermute2Uint64x8 + OpPermuteFloat32x8 + OpPermuteFloat32x16 + OpPermuteFloat64x4 + OpPermuteFloat64x8 + OpPermuteInt8x16 + OpPermuteInt8x32 + OpPermuteInt8x64 + OpPermuteInt16x8 + OpPermuteInt16x16 + OpPermuteInt16x32 + OpPermuteInt32x8 + OpPermuteInt32x16 + OpPermuteInt64x4 + OpPermuteInt64x8 + OpPermuteMaskedFloat32x8 + OpPermuteMaskedFloat32x16 + OpPermuteMaskedFloat64x4 + OpPermuteMaskedFloat64x8 + OpPermuteMaskedInt8x16 + OpPermuteMaskedInt8x32 + OpPermuteMaskedInt8x64 + OpPermuteMaskedInt16x8 + OpPermuteMaskedInt16x16 + OpPermuteMaskedInt16x32 + OpPermuteMaskedInt32x8 + OpPermuteMaskedInt32x16 + OpPermuteMaskedInt64x4 + OpPermuteMaskedInt64x8 + OpPermuteMaskedUint8x16 + OpPermuteMaskedUint8x32 + OpPermuteMaskedUint8x64 + OpPermuteMaskedUint16x8 + OpPermuteMaskedUint16x16 + OpPermuteMaskedUint16x32 + OpPermuteMaskedUint32x8 + OpPermuteMaskedUint32x16 + OpPermuteMaskedUint64x4 + OpPermuteMaskedUint64x8 + OpPermuteUint8x16 + OpPermuteUint8x32 + OpPermuteUint8x64 + OpPermuteUint16x8 + OpPermuteUint16x16 + OpPermuteUint16x32 + OpPermuteUint32x8 + OpPermuteUint32x16 + OpPermuteUint64x4 + OpPermuteUint64x8 + OpPopCountInt8x16 + OpPopCountInt8x32 + OpPopCountInt8x64 + OpPopCountInt16x8 OpPopCountInt16x16 - OpPopCountMaskedInt16x16 - OpSaturatedAddInt16x16 - OpSaturatedAddMaskedInt16x16 - OpSaturatedPairwiseAddInt16x16 - OpSaturatedPairwiseSubInt16x16 - OpSaturatedSubInt16x16 - OpSaturatedSubMaskedInt16x16 - OpShiftAllLeftInt16x16 - OpShiftAllLeftMaskedInt16x16 - OpShiftAllRightInt16x16 - OpShiftAllRightMaskedInt16x16 - OpShiftLeftInt16x16 - OpShiftLeftAndFillUpperFromInt16x16 - OpShiftLeftAndFillUpperFromMaskedInt16x16 - OpShiftLeftMaskedInt16x16 - OpShiftRightInt16x16 - OpShiftRightAndFillUpperFromInt16x16 - OpShiftRightAndFillUpperFromMaskedInt16x16 - OpShiftRightMaskedInt16x16 - OpSignInt16x16 - OpSubInt16x16 - OpSubMaskedInt16x16 - OpXorInt16x16 - OpAbsoluteInt16x32 - OpAbsoluteMaskedInt16x32 - OpAddInt16x32 - OpAddMaskedInt16x32 - OpCompressInt16x32 - OpEqualInt16x32 - OpEqualMaskedInt16x32 - OpGreaterInt16x32 - OpGreaterEqualInt16x32 - OpGreaterEqualMaskedInt16x32 - OpGreaterMaskedInt16x32 - OpLessInt16x32 - OpLessEqualInt16x32 - OpLessEqualMaskedInt16x32 - OpLessMaskedInt16x32 - OpMaxInt16x32 - OpMaxMaskedInt16x32 - OpMinInt16x32 - OpMinMaskedInt16x32 - OpMulHighInt16x32 - OpMulHighMaskedInt16x32 - OpMulLowInt16x32 - OpMulLowMaskedInt16x32 - OpNotEqualInt16x32 - OpNotEqualMaskedInt16x32 - OpPairDotProdInt16x32 - OpPairDotProdMaskedInt16x32 OpPopCountInt16x32 + OpPopCountInt32x4 + OpPopCountInt32x8 + OpPopCountInt32x16 + OpPopCountInt64x2 + OpPopCountInt64x4 + OpPopCountInt64x8 + OpPopCountMaskedInt8x16 + OpPopCountMaskedInt8x32 + OpPopCountMaskedInt8x64 + OpPopCountMaskedInt16x8 + OpPopCountMaskedInt16x16 OpPopCountMaskedInt16x32 - OpSaturatedAddInt16x32 - OpSaturatedAddMaskedInt16x32 - OpSaturatedSubInt16x32 - OpSaturatedSubMaskedInt16x32 - OpShiftAllLeftInt16x32 - OpShiftAllLeftMaskedInt16x32 - OpShiftAllRightInt16x32 - OpShiftAllRightMaskedInt16x32 - OpShiftLeftInt16x32 - OpShiftLeftAndFillUpperFromInt16x32 - OpShiftLeftAndFillUpperFromMaskedInt16x32 - OpShiftLeftMaskedInt16x32 - OpShiftRightInt16x32 - OpShiftRightAndFillUpperFromInt16x32 - OpShiftRightAndFillUpperFromMaskedInt16x32 - OpShiftRightMaskedInt16x32 - OpSubInt16x32 - OpSubMaskedInt16x32 - OpAbsoluteInt16x8 - OpAbsoluteMaskedInt16x8 - OpAddInt16x8 - OpAddMaskedInt16x8 - OpAndInt16x8 - OpAndNotInt16x8 - OpCompressInt16x8 - OpEqualInt16x8 - OpEqualMaskedInt16x8 - OpGreaterInt16x8 - OpGreaterEqualInt16x8 - OpGreaterEqualMaskedInt16x8 - OpGreaterMaskedInt16x8 - OpLessInt16x8 - OpLessEqualInt16x8 - OpLessEqualMaskedInt16x8 - OpLessMaskedInt16x8 - OpMaxInt16x8 - OpMaxMaskedInt16x8 - OpMinInt16x8 - OpMinMaskedInt16x8 - OpMulHighInt16x8 - OpMulHighMaskedInt16x8 - OpMulLowInt16x8 - OpMulLowMaskedInt16x8 - OpNotEqualInt16x8 - OpNotEqualMaskedInt16x8 - OpOrInt16x8 - OpPairDotProdInt16x8 - OpPairDotProdMaskedInt16x8 - OpPairwiseAddInt16x8 - OpPairwiseSubInt16x8 - OpPopCountInt16x8 - OpPopCountMaskedInt16x8 - OpSaturatedAddInt16x8 - OpSaturatedAddMaskedInt16x8 - OpSaturatedPairwiseAddInt16x8 - OpSaturatedPairwiseSubInt16x8 - OpSaturatedSubInt16x8 - OpSaturatedSubMaskedInt16x8 - OpShiftAllLeftInt16x8 - OpShiftAllLeftMaskedInt16x8 - OpShiftAllRightInt16x8 - OpShiftAllRightMaskedInt16x8 - OpShiftLeftInt16x8 - OpShiftLeftAndFillUpperFromInt16x8 - OpShiftLeftAndFillUpperFromMaskedInt16x8 - OpShiftLeftMaskedInt16x8 - OpShiftRightInt16x8 - OpShiftRightAndFillUpperFromInt16x8 - OpShiftRightAndFillUpperFromMaskedInt16x8 - OpShiftRightMaskedInt16x8 - OpSignInt16x8 - OpSubInt16x8 - OpSubMaskedInt16x8 - OpXorInt16x8 - OpAbsoluteInt32x16 - OpAbsoluteMaskedInt32x16 - OpAddInt32x16 - OpAddMaskedInt32x16 - OpAndInt32x16 - OpAndMaskedInt32x16 - OpAndNotInt32x16 - OpAndNotMaskedInt32x16 - OpCompressInt32x16 - OpEqualInt32x16 - OpEqualMaskedInt32x16 - OpGreaterInt32x16 - OpGreaterEqualInt32x16 - OpGreaterEqualMaskedInt32x16 - OpGreaterMaskedInt32x16 - OpLessInt32x16 - OpLessEqualInt32x16 - OpLessEqualMaskedInt32x16 - OpLessMaskedInt32x16 - OpMaxInt32x16 - OpMaxMaskedInt32x16 - OpMinInt32x16 - OpMinMaskedInt32x16 - OpMulLowInt32x16 - OpMulLowMaskedInt32x16 - OpNotEqualInt32x16 - OpNotEqualMaskedInt32x16 - OpOrInt32x16 - OpOrMaskedInt32x16 - OpPairDotProdAccumulateInt32x16 - OpPairDotProdAccumulateMaskedInt32x16 - OpPopCountInt32x16 + OpPopCountMaskedInt32x4 + OpPopCountMaskedInt32x8 OpPopCountMaskedInt32x16 + OpPopCountMaskedInt64x2 + OpPopCountMaskedInt64x4 + OpPopCountMaskedInt64x8 + OpPopCountMaskedUint8x16 + OpPopCountMaskedUint8x32 + OpPopCountMaskedUint8x64 + OpPopCountMaskedUint16x8 + OpPopCountMaskedUint16x16 + OpPopCountMaskedUint16x32 + OpPopCountMaskedUint32x4 + OpPopCountMaskedUint32x8 + OpPopCountMaskedUint32x16 + OpPopCountMaskedUint64x2 + OpPopCountMaskedUint64x4 + OpPopCountMaskedUint64x8 + OpPopCountUint8x16 + OpPopCountUint8x32 + OpPopCountUint8x64 + OpPopCountUint16x8 + OpPopCountUint16x16 + OpPopCountUint16x32 + OpPopCountUint32x4 + OpPopCountUint32x8 + OpPopCountUint32x16 + OpPopCountUint64x2 + OpPopCountUint64x4 + OpPopCountUint64x8 + OpRotateLeftInt32x4 + OpRotateLeftInt32x8 OpRotateLeftInt32x16 + OpRotateLeftInt64x2 + OpRotateLeftInt64x4 + OpRotateLeftInt64x8 + OpRotateLeftMaskedInt32x4 + OpRotateLeftMaskedInt32x8 OpRotateLeftMaskedInt32x16 + OpRotateLeftMaskedInt64x2 + OpRotateLeftMaskedInt64x4 + OpRotateLeftMaskedInt64x8 + OpRotateLeftMaskedUint32x4 + OpRotateLeftMaskedUint32x8 + OpRotateLeftMaskedUint32x16 + OpRotateLeftMaskedUint64x2 + OpRotateLeftMaskedUint64x4 + OpRotateLeftMaskedUint64x8 + OpRotateLeftUint32x4 + OpRotateLeftUint32x8 + OpRotateLeftUint32x16 + OpRotateLeftUint64x2 + OpRotateLeftUint64x4 + OpRotateLeftUint64x8 + OpRotateRightInt32x4 + OpRotateRightInt32x8 OpRotateRightInt32x16 + OpRotateRightInt64x2 + OpRotateRightInt64x4 + OpRotateRightInt64x8 + OpRotateRightMaskedInt32x4 + OpRotateRightMaskedInt32x8 OpRotateRightMaskedInt32x16 + OpRotateRightMaskedInt64x2 + OpRotateRightMaskedInt64x4 + OpRotateRightMaskedInt64x8 + OpRotateRightMaskedUint32x4 + OpRotateRightMaskedUint32x8 + OpRotateRightMaskedUint32x16 + OpRotateRightMaskedUint64x2 + OpRotateRightMaskedUint64x4 + OpRotateRightMaskedUint64x8 + OpRotateRightUint32x4 + OpRotateRightUint32x8 + OpRotateRightUint32x16 + OpRotateRightUint64x2 + OpRotateRightUint64x4 + OpRotateRightUint64x8 + OpRoundFloat32x4 + OpRoundFloat32x8 + OpRoundFloat64x2 + OpRoundFloat64x4 + OpSaturatedAddInt8x16 + OpSaturatedAddInt8x32 + OpSaturatedAddInt8x64 + OpSaturatedAddInt16x8 + OpSaturatedAddInt16x16 + OpSaturatedAddInt16x32 + OpSaturatedAddMaskedInt8x16 + OpSaturatedAddMaskedInt8x32 + OpSaturatedAddMaskedInt8x64 + OpSaturatedAddMaskedInt16x8 + OpSaturatedAddMaskedInt16x16 + OpSaturatedAddMaskedInt16x32 + OpSaturatedAddMaskedUint8x16 + OpSaturatedAddMaskedUint8x32 + OpSaturatedAddMaskedUint8x64 + OpSaturatedAddMaskedUint16x8 + OpSaturatedAddMaskedUint16x16 + OpSaturatedAddMaskedUint16x32 + OpSaturatedAddUint8x16 + OpSaturatedAddUint8x32 + OpSaturatedAddUint8x64 + OpSaturatedAddUint16x8 + OpSaturatedAddUint16x16 + OpSaturatedAddUint16x32 + OpSaturatedPairDotProdAccumulateInt32x4 + OpSaturatedPairDotProdAccumulateInt32x8 OpSaturatedPairDotProdAccumulateInt32x16 + OpSaturatedPairDotProdAccumulateMaskedInt32x4 + OpSaturatedPairDotProdAccumulateMaskedInt32x8 OpSaturatedPairDotProdAccumulateMaskedInt32x16 + OpSaturatedPairwiseAddInt16x8 + OpSaturatedPairwiseAddInt16x16 + OpSaturatedPairwiseSubInt16x8 + OpSaturatedPairwiseSubInt16x16 + OpSaturatedSubInt8x16 + OpSaturatedSubInt8x32 + OpSaturatedSubInt8x64 + OpSaturatedSubInt16x8 + OpSaturatedSubInt16x16 + OpSaturatedSubInt16x32 + OpSaturatedSubMaskedInt8x16 + OpSaturatedSubMaskedInt8x32 + OpSaturatedSubMaskedInt8x64 + OpSaturatedSubMaskedInt16x8 + OpSaturatedSubMaskedInt16x16 + OpSaturatedSubMaskedInt16x32 + OpSaturatedSubMaskedUint8x16 + OpSaturatedSubMaskedUint8x32 + OpSaturatedSubMaskedUint8x64 + OpSaturatedSubMaskedUint16x8 + OpSaturatedSubMaskedUint16x16 + OpSaturatedSubMaskedUint16x32 + OpSaturatedSubUint8x16 + OpSaturatedSubUint8x32 + OpSaturatedSubUint8x64 + OpSaturatedSubUint16x8 + OpSaturatedSubUint16x16 + OpSaturatedSubUint16x32 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 + OpSaturatedUnsignedSignedPairDotProdUint8x16 + OpSaturatedUnsignedSignedPairDotProdUint8x32 + OpSaturatedUnsignedSignedPairDotProdUint8x64 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpShiftAllLeftInt16x8 + OpShiftAllLeftInt16x16 + OpShiftAllLeftInt16x32 + OpShiftAllLeftInt32x4 + OpShiftAllLeftInt32x8 OpShiftAllLeftInt32x16 + OpShiftAllLeftInt64x2 + OpShiftAllLeftInt64x4 + OpShiftAllLeftInt64x8 + OpShiftAllLeftMaskedInt16x8 + OpShiftAllLeftMaskedInt16x16 + OpShiftAllLeftMaskedInt16x32 + OpShiftAllLeftMaskedInt32x4 + OpShiftAllLeftMaskedInt32x8 OpShiftAllLeftMaskedInt32x16 + OpShiftAllLeftMaskedInt64x2 + OpShiftAllLeftMaskedInt64x4 + OpShiftAllLeftMaskedInt64x8 + OpShiftAllLeftMaskedUint16x8 + OpShiftAllLeftMaskedUint16x16 + OpShiftAllLeftMaskedUint16x32 + OpShiftAllLeftMaskedUint32x4 + OpShiftAllLeftMaskedUint32x8 + OpShiftAllLeftMaskedUint32x16 + OpShiftAllLeftMaskedUint64x2 + OpShiftAllLeftMaskedUint64x4 + OpShiftAllLeftMaskedUint64x8 + OpShiftAllLeftUint16x8 + OpShiftAllLeftUint16x16 + OpShiftAllLeftUint16x32 + OpShiftAllLeftUint32x4 + OpShiftAllLeftUint32x8 + OpShiftAllLeftUint32x16 + OpShiftAllLeftUint64x2 + OpShiftAllLeftUint64x4 + OpShiftAllLeftUint64x8 + OpShiftAllRightInt16x8 + OpShiftAllRightInt16x16 + OpShiftAllRightInt16x32 + OpShiftAllRightInt32x4 + OpShiftAllRightInt32x8 OpShiftAllRightInt32x16 + OpShiftAllRightInt64x2 + OpShiftAllRightInt64x4 + OpShiftAllRightInt64x8 + OpShiftAllRightMaskedInt16x8 + OpShiftAllRightMaskedInt16x16 + OpShiftAllRightMaskedInt16x32 + OpShiftAllRightMaskedInt32x4 + OpShiftAllRightMaskedInt32x8 OpShiftAllRightMaskedInt32x16 - OpShiftLeftInt32x16 + OpShiftAllRightMaskedInt64x2 + OpShiftAllRightMaskedInt64x4 + OpShiftAllRightMaskedInt64x8 + OpShiftAllRightMaskedUint16x8 + OpShiftAllRightMaskedUint16x16 + OpShiftAllRightMaskedUint16x32 + OpShiftAllRightMaskedUint32x4 + OpShiftAllRightMaskedUint32x8 + OpShiftAllRightMaskedUint32x16 + OpShiftAllRightMaskedUint64x2 + OpShiftAllRightMaskedUint64x4 + OpShiftAllRightMaskedUint64x8 + OpShiftAllRightUint16x8 + OpShiftAllRightUint16x16 + OpShiftAllRightUint16x32 + OpShiftAllRightUint32x4 + OpShiftAllRightUint32x8 + OpShiftAllRightUint32x16 + OpShiftAllRightUint64x2 + OpShiftAllRightUint64x4 + OpShiftAllRightUint64x8 + OpShiftLeftAndFillUpperFromInt16x8 + OpShiftLeftAndFillUpperFromInt16x16 + OpShiftLeftAndFillUpperFromInt16x32 + OpShiftLeftAndFillUpperFromInt32x4 + OpShiftLeftAndFillUpperFromInt32x8 OpShiftLeftAndFillUpperFromInt32x16 + OpShiftLeftAndFillUpperFromInt64x2 + OpShiftLeftAndFillUpperFromInt64x4 + OpShiftLeftAndFillUpperFromInt64x8 + OpShiftLeftAndFillUpperFromMaskedInt16x8 + OpShiftLeftAndFillUpperFromMaskedInt16x16 + OpShiftLeftAndFillUpperFromMaskedInt16x32 + OpShiftLeftAndFillUpperFromMaskedInt32x4 + OpShiftLeftAndFillUpperFromMaskedInt32x8 OpShiftLeftAndFillUpperFromMaskedInt32x16 - OpShiftLeftMaskedInt32x16 - OpShiftRightInt32x16 - OpShiftRightAndFillUpperFromInt32x16 - OpShiftRightAndFillUpperFromMaskedInt32x16 - OpShiftRightMaskedInt32x16 - OpSubInt32x16 - OpSubMaskedInt32x16 - OpUnsignedSignedQuadDotProdAccumulateInt32x16 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 - OpXorInt32x16 - OpXorMaskedInt32x16 - OpAbsoluteInt32x4 - OpAbsoluteMaskedInt32x4 - OpAddInt32x4 - OpAddMaskedInt32x4 - OpAndInt32x4 - OpAndMaskedInt32x4 - OpAndNotInt32x4 - OpAndNotMaskedInt32x4 - OpCompressInt32x4 - OpEqualInt32x4 - OpEqualMaskedInt32x4 - OpGreaterInt32x4 - OpGreaterEqualInt32x4 - OpGreaterEqualMaskedInt32x4 - OpGreaterMaskedInt32x4 - OpLessInt32x4 - OpLessEqualInt32x4 - OpLessEqualMaskedInt32x4 - OpLessMaskedInt32x4 - OpMaxInt32x4 - OpMaxMaskedInt32x4 - OpMinInt32x4 - OpMinMaskedInt32x4 - OpMulEvenWidenInt32x4 - OpMulLowInt32x4 - OpMulLowMaskedInt32x4 - OpNotEqualInt32x4 - OpNotEqualMaskedInt32x4 - OpOrInt32x4 - OpOrMaskedInt32x4 - OpPairDotProdAccumulateInt32x4 - OpPairDotProdAccumulateMaskedInt32x4 - OpPairwiseAddInt32x4 - OpPairwiseSubInt32x4 - OpPopCountInt32x4 - OpPopCountMaskedInt32x4 - OpRotateLeftInt32x4 - OpRotateLeftMaskedInt32x4 - OpRotateRightInt32x4 - OpRotateRightMaskedInt32x4 - OpSaturatedPairDotProdAccumulateInt32x4 - OpSaturatedPairDotProdAccumulateMaskedInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpShiftAllLeftInt32x4 - OpShiftAllLeftMaskedInt32x4 - OpShiftAllRightInt32x4 - OpShiftAllRightMaskedInt32x4 + OpShiftLeftAndFillUpperFromMaskedInt64x2 + OpShiftLeftAndFillUpperFromMaskedInt64x4 + OpShiftLeftAndFillUpperFromMaskedInt64x8 + OpShiftLeftAndFillUpperFromMaskedUint16x8 + OpShiftLeftAndFillUpperFromMaskedUint16x16 + OpShiftLeftAndFillUpperFromMaskedUint16x32 + OpShiftLeftAndFillUpperFromMaskedUint32x4 + OpShiftLeftAndFillUpperFromMaskedUint32x8 + OpShiftLeftAndFillUpperFromMaskedUint32x16 + OpShiftLeftAndFillUpperFromMaskedUint64x2 + OpShiftLeftAndFillUpperFromMaskedUint64x4 + OpShiftLeftAndFillUpperFromMaskedUint64x8 + OpShiftLeftAndFillUpperFromUint16x8 + OpShiftLeftAndFillUpperFromUint16x16 + OpShiftLeftAndFillUpperFromUint16x32 + OpShiftLeftAndFillUpperFromUint32x4 + OpShiftLeftAndFillUpperFromUint32x8 + OpShiftLeftAndFillUpperFromUint32x16 + OpShiftLeftAndFillUpperFromUint64x2 + OpShiftLeftAndFillUpperFromUint64x4 + OpShiftLeftAndFillUpperFromUint64x8 + OpShiftLeftInt16x8 + OpShiftLeftInt16x16 + OpShiftLeftInt16x32 OpShiftLeftInt32x4 - OpShiftLeftAndFillUpperFromInt32x4 - OpShiftLeftAndFillUpperFromMaskedInt32x4 + OpShiftLeftInt32x8 + OpShiftLeftInt32x16 + OpShiftLeftInt64x2 + OpShiftLeftInt64x4 + OpShiftLeftInt64x8 + OpShiftLeftMaskedInt16x8 + OpShiftLeftMaskedInt16x16 + OpShiftLeftMaskedInt16x32 OpShiftLeftMaskedInt32x4 - OpShiftRightInt32x4 + OpShiftLeftMaskedInt32x8 + OpShiftLeftMaskedInt32x16 + OpShiftLeftMaskedInt64x2 + OpShiftLeftMaskedInt64x4 + OpShiftLeftMaskedInt64x8 + OpShiftLeftMaskedUint16x8 + OpShiftLeftMaskedUint16x16 + OpShiftLeftMaskedUint16x32 + OpShiftLeftMaskedUint32x4 + OpShiftLeftMaskedUint32x8 + OpShiftLeftMaskedUint32x16 + OpShiftLeftMaskedUint64x2 + OpShiftLeftMaskedUint64x4 + OpShiftLeftMaskedUint64x8 + OpShiftLeftUint16x8 + OpShiftLeftUint16x16 + OpShiftLeftUint16x32 + OpShiftLeftUint32x4 + OpShiftLeftUint32x8 + OpShiftLeftUint32x16 + OpShiftLeftUint64x2 + OpShiftLeftUint64x4 + OpShiftLeftUint64x8 + OpShiftRightAndFillUpperFromInt16x8 + OpShiftRightAndFillUpperFromInt16x16 + OpShiftRightAndFillUpperFromInt16x32 OpShiftRightAndFillUpperFromInt32x4 + OpShiftRightAndFillUpperFromInt32x8 + OpShiftRightAndFillUpperFromInt32x16 + OpShiftRightAndFillUpperFromInt64x2 + OpShiftRightAndFillUpperFromInt64x4 + OpShiftRightAndFillUpperFromInt64x8 + OpShiftRightAndFillUpperFromMaskedInt16x8 + OpShiftRightAndFillUpperFromMaskedInt16x16 + OpShiftRightAndFillUpperFromMaskedInt16x32 OpShiftRightAndFillUpperFromMaskedInt32x4 + OpShiftRightAndFillUpperFromMaskedInt32x8 + OpShiftRightAndFillUpperFromMaskedInt32x16 + OpShiftRightAndFillUpperFromMaskedInt64x2 + OpShiftRightAndFillUpperFromMaskedInt64x4 + OpShiftRightAndFillUpperFromMaskedInt64x8 + OpShiftRightAndFillUpperFromMaskedUint16x8 + OpShiftRightAndFillUpperFromMaskedUint16x16 + OpShiftRightAndFillUpperFromMaskedUint16x32 + OpShiftRightAndFillUpperFromMaskedUint32x4 + OpShiftRightAndFillUpperFromMaskedUint32x8 + OpShiftRightAndFillUpperFromMaskedUint32x16 + OpShiftRightAndFillUpperFromMaskedUint64x2 + OpShiftRightAndFillUpperFromMaskedUint64x4 + OpShiftRightAndFillUpperFromMaskedUint64x8 + OpShiftRightAndFillUpperFromUint16x8 + OpShiftRightAndFillUpperFromUint16x16 + OpShiftRightAndFillUpperFromUint16x32 + OpShiftRightAndFillUpperFromUint32x4 + OpShiftRightAndFillUpperFromUint32x8 + OpShiftRightAndFillUpperFromUint32x16 + OpShiftRightAndFillUpperFromUint64x2 + OpShiftRightAndFillUpperFromUint64x4 + OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightInt16x8 + OpShiftRightInt16x16 + OpShiftRightInt16x32 + OpShiftRightInt32x4 + OpShiftRightInt32x8 + OpShiftRightInt32x16 + OpShiftRightInt64x2 + OpShiftRightInt64x4 + OpShiftRightInt64x8 + OpShiftRightMaskedInt16x8 + OpShiftRightMaskedInt16x16 + OpShiftRightMaskedInt16x32 OpShiftRightMaskedInt32x4 + OpShiftRightMaskedInt32x8 + OpShiftRightMaskedInt32x16 + OpShiftRightMaskedInt64x2 + OpShiftRightMaskedInt64x4 + OpShiftRightMaskedInt64x8 + OpShiftRightMaskedUint16x8 + OpShiftRightMaskedUint16x16 + OpShiftRightMaskedUint16x32 + OpShiftRightMaskedUint32x4 + OpShiftRightMaskedUint32x8 + OpShiftRightMaskedUint32x16 + OpShiftRightMaskedUint64x2 + OpShiftRightMaskedUint64x4 + OpShiftRightMaskedUint64x8 + OpShiftRightUint16x8 + OpShiftRightUint16x16 + OpShiftRightUint16x32 + OpShiftRightUint32x4 + OpShiftRightUint32x8 + OpShiftRightUint32x16 + OpShiftRightUint64x2 + OpShiftRightUint64x4 + OpShiftRightUint64x8 + OpSignInt8x16 + OpSignInt8x32 + OpSignInt16x8 + OpSignInt16x16 OpSignInt32x4 - OpSubInt32x4 - OpSubMaskedInt32x4 - OpUnsignedSignedQuadDotProdAccumulateInt32x4 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpXorInt32x4 - OpXorMaskedInt32x4 - OpAbsoluteInt32x8 - OpAbsoluteMaskedInt32x8 - OpAddInt32x8 - OpAddMaskedInt32x8 - OpAndInt32x8 - OpAndMaskedInt32x8 - OpAndNotInt32x8 - OpAndNotMaskedInt32x8 - OpCompressInt32x8 - OpEqualInt32x8 - OpEqualMaskedInt32x8 - OpGreaterInt32x8 - OpGreaterEqualInt32x8 - OpGreaterEqualMaskedInt32x8 - OpGreaterMaskedInt32x8 - OpLessInt32x8 - OpLessEqualInt32x8 - OpLessEqualMaskedInt32x8 - OpLessMaskedInt32x8 - OpMaxInt32x8 - OpMaxMaskedInt32x8 - OpMinInt32x8 - OpMinMaskedInt32x8 - OpMulEvenWidenInt32x8 - OpMulLowInt32x8 - OpMulLowMaskedInt32x8 - OpNotEqualInt32x8 - OpNotEqualMaskedInt32x8 - OpOrInt32x8 - OpOrMaskedInt32x8 - OpPairDotProdAccumulateInt32x8 - OpPairDotProdAccumulateMaskedInt32x8 - OpPairwiseAddInt32x8 - OpPairwiseSubInt32x8 - OpPopCountInt32x8 - OpPopCountMaskedInt32x8 - OpRotateLeftInt32x8 - OpRotateLeftMaskedInt32x8 - OpRotateRightInt32x8 - OpRotateRightMaskedInt32x8 - OpSaturatedPairDotProdAccumulateInt32x8 - OpSaturatedPairDotProdAccumulateMaskedInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpShiftAllLeftInt32x8 - OpShiftAllLeftMaskedInt32x8 - OpShiftAllRightInt32x8 - OpShiftAllRightMaskedInt32x8 - OpShiftLeftInt32x8 - OpShiftLeftAndFillUpperFromInt32x8 - OpShiftLeftAndFillUpperFromMaskedInt32x8 - OpShiftLeftMaskedInt32x8 - OpShiftRightInt32x8 - OpShiftRightAndFillUpperFromInt32x8 - OpShiftRightAndFillUpperFromMaskedInt32x8 - OpShiftRightMaskedInt32x8 OpSignInt32x8 + OpSqrtFloat32x4 + OpSqrtFloat32x8 + OpSqrtFloat32x16 + OpSqrtFloat64x2 + OpSqrtFloat64x4 + OpSqrtFloat64x8 + OpSqrtMaskedFloat32x4 + OpSqrtMaskedFloat32x8 + OpSqrtMaskedFloat32x16 + OpSqrtMaskedFloat64x2 + OpSqrtMaskedFloat64x4 + OpSqrtMaskedFloat64x8 + OpSubFloat32x4 + OpSubFloat32x8 + OpSubFloat32x16 + OpSubFloat64x2 + OpSubFloat64x4 + OpSubFloat64x8 + OpSubInt8x16 + OpSubInt8x32 + OpSubInt8x64 + OpSubInt16x8 + OpSubInt16x16 + OpSubInt16x32 + OpSubInt32x4 OpSubInt32x8 - OpSubMaskedInt32x8 - OpUnsignedSignedQuadDotProdAccumulateInt32x8 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpXorInt32x8 - OpXorMaskedInt32x8 - OpAbsoluteInt64x2 - OpAbsoluteMaskedInt64x2 - OpAddInt64x2 - OpAddMaskedInt64x2 - OpAndInt64x2 - OpAndMaskedInt64x2 - OpAndNotInt64x2 - OpAndNotMaskedInt64x2 - OpCompressInt64x2 - OpEqualInt64x2 - OpEqualMaskedInt64x2 - OpGreaterInt64x2 - OpGreaterEqualInt64x2 - OpGreaterEqualMaskedInt64x2 - OpGreaterMaskedInt64x2 - OpLessInt64x2 - OpLessEqualInt64x2 - OpLessEqualMaskedInt64x2 - OpLessMaskedInt64x2 - OpMaxInt64x2 - OpMaxMaskedInt64x2 - OpMinInt64x2 - OpMinMaskedInt64x2 - OpMulEvenWidenInt64x2 - OpMulEvenWidenMaskedInt64x2 - OpMulLowInt64x2 - OpMulLowMaskedInt64x2 - OpNotEqualInt64x2 - OpNotEqualMaskedInt64x2 - OpOrInt64x2 - OpOrMaskedInt64x2 - OpPopCountInt64x2 - OpPopCountMaskedInt64x2 - OpRotateLeftInt64x2 - OpRotateLeftMaskedInt64x2 - OpRotateRightInt64x2 - OpRotateRightMaskedInt64x2 - OpShiftAllLeftInt64x2 - OpShiftAllLeftMaskedInt64x2 - OpShiftAllRightInt64x2 - OpShiftAllRightMaskedInt64x2 - OpShiftLeftInt64x2 - OpShiftLeftAndFillUpperFromInt64x2 - OpShiftLeftAndFillUpperFromMaskedInt64x2 - OpShiftLeftMaskedInt64x2 - OpShiftRightInt64x2 - OpShiftRightAndFillUpperFromInt64x2 - OpShiftRightAndFillUpperFromMaskedInt64x2 - OpShiftRightMaskedInt64x2 + OpSubInt32x16 OpSubInt64x2 - OpSubMaskedInt64x2 - OpXorInt64x2 - OpXorMaskedInt64x2 - OpAbsoluteInt64x4 - OpAbsoluteMaskedInt64x4 - OpAddInt64x4 - OpAddMaskedInt64x4 - OpAndInt64x4 - OpAndMaskedInt64x4 - OpAndNotInt64x4 - OpAndNotMaskedInt64x4 - OpCompressInt64x4 - OpEqualInt64x4 - OpEqualMaskedInt64x4 - OpGreaterInt64x4 - OpGreaterEqualInt64x4 - OpGreaterEqualMaskedInt64x4 - OpGreaterMaskedInt64x4 - OpLessInt64x4 - OpLessEqualInt64x4 - OpLessEqualMaskedInt64x4 - OpLessMaskedInt64x4 - OpMaxInt64x4 - OpMaxMaskedInt64x4 - OpMinInt64x4 - OpMinMaskedInt64x4 - OpMulEvenWidenInt64x4 - OpMulEvenWidenMaskedInt64x4 - OpMulLowInt64x4 - OpMulLowMaskedInt64x4 - OpNotEqualInt64x4 - OpNotEqualMaskedInt64x4 - OpOrInt64x4 - OpOrMaskedInt64x4 - OpPopCountInt64x4 - OpPopCountMaskedInt64x4 - OpRotateLeftInt64x4 - OpRotateLeftMaskedInt64x4 - OpRotateRightInt64x4 - OpRotateRightMaskedInt64x4 - OpShiftAllLeftInt64x4 - OpShiftAllLeftMaskedInt64x4 - OpShiftAllRightInt64x4 - OpShiftAllRightMaskedInt64x4 - OpShiftLeftInt64x4 - OpShiftLeftAndFillUpperFromInt64x4 - OpShiftLeftAndFillUpperFromMaskedInt64x4 - OpShiftLeftMaskedInt64x4 - OpShiftRightInt64x4 - OpShiftRightAndFillUpperFromInt64x4 - OpShiftRightAndFillUpperFromMaskedInt64x4 - OpShiftRightMaskedInt64x4 OpSubInt64x4 - OpSubMaskedInt64x4 - OpXorInt64x4 - OpXorMaskedInt64x4 - OpAbsoluteInt64x8 - OpAbsoluteMaskedInt64x8 - OpAddInt64x8 - OpAddMaskedInt64x8 - OpAndInt64x8 - OpAndMaskedInt64x8 - OpAndNotInt64x8 - OpAndNotMaskedInt64x8 - OpCompressInt64x8 - OpEqualInt64x8 - OpEqualMaskedInt64x8 - OpGreaterInt64x8 - OpGreaterEqualInt64x8 - OpGreaterEqualMaskedInt64x8 - OpGreaterMaskedInt64x8 - OpLessInt64x8 - OpLessEqualInt64x8 - OpLessEqualMaskedInt64x8 - OpLessMaskedInt64x8 - OpMaxInt64x8 - OpMaxMaskedInt64x8 - OpMinInt64x8 - OpMinMaskedInt64x8 - OpMulEvenWidenInt64x8 - OpMulEvenWidenMaskedInt64x8 - OpMulLowInt64x8 - OpMulLowMaskedInt64x8 - OpNotEqualInt64x8 - OpNotEqualMaskedInt64x8 - OpOrInt64x8 - OpOrMaskedInt64x8 - OpPopCountInt64x8 - OpPopCountMaskedInt64x8 - OpRotateLeftInt64x8 - OpRotateLeftMaskedInt64x8 - OpRotateRightInt64x8 - OpRotateRightMaskedInt64x8 - OpShiftAllLeftInt64x8 - OpShiftAllLeftMaskedInt64x8 - OpShiftAllRightInt64x8 - OpShiftAllRightMaskedInt64x8 - OpShiftLeftInt64x8 - OpShiftLeftAndFillUpperFromInt64x8 - OpShiftLeftAndFillUpperFromMaskedInt64x8 - OpShiftLeftMaskedInt64x8 - OpShiftRightInt64x8 - OpShiftRightAndFillUpperFromInt64x8 - OpShiftRightAndFillUpperFromMaskedInt64x8 - OpShiftRightMaskedInt64x8 OpSubInt64x8 - OpSubMaskedInt64x8 - OpXorInt64x8 - OpXorMaskedInt64x8 - OpAbsoluteInt8x16 - OpAbsoluteMaskedInt8x16 - OpAddInt8x16 - OpAddMaskedInt8x16 - OpAndInt8x16 - OpAndNotInt8x16 - OpCompressInt8x16 - OpEqualInt8x16 - OpEqualMaskedInt8x16 - OpGreaterInt8x16 - OpGreaterEqualInt8x16 - OpGreaterEqualMaskedInt8x16 - OpGreaterMaskedInt8x16 - OpLessInt8x16 - OpLessEqualInt8x16 - OpLessEqualMaskedInt8x16 - OpLessMaskedInt8x16 - OpMaxInt8x16 - OpMaxMaskedInt8x16 - OpMinInt8x16 - OpMinMaskedInt8x16 - OpNotEqualInt8x16 - OpNotEqualMaskedInt8x16 - OpOrInt8x16 - OpPopCountInt8x16 - OpPopCountMaskedInt8x16 - OpSaturatedAddInt8x16 - OpSaturatedAddMaskedInt8x16 - OpSaturatedSubInt8x16 - OpSaturatedSubMaskedInt8x16 - OpSignInt8x16 - OpSubInt8x16 + OpSubMaskedFloat32x4 + OpSubMaskedFloat32x8 + OpSubMaskedFloat32x16 + OpSubMaskedFloat64x2 + OpSubMaskedFloat64x4 + OpSubMaskedFloat64x8 OpSubMaskedInt8x16 - OpXorInt8x16 - OpAbsoluteInt8x32 - OpAbsoluteMaskedInt8x32 - OpAddInt8x32 - OpAddMaskedInt8x32 - OpAndInt8x32 - OpAndNotInt8x32 - OpCompressInt8x32 - OpEqualInt8x32 - OpEqualMaskedInt8x32 - OpGreaterInt8x32 - OpGreaterEqualInt8x32 - OpGreaterEqualMaskedInt8x32 - OpGreaterMaskedInt8x32 - OpLessInt8x32 - OpLessEqualInt8x32 - OpLessEqualMaskedInt8x32 - OpLessMaskedInt8x32 - OpMaxInt8x32 - OpMaxMaskedInt8x32 - OpMinInt8x32 - OpMinMaskedInt8x32 - OpNotEqualInt8x32 - OpNotEqualMaskedInt8x32 - OpOrInt8x32 - OpPopCountInt8x32 - OpPopCountMaskedInt8x32 - OpSaturatedAddInt8x32 - OpSaturatedAddMaskedInt8x32 - OpSaturatedSubInt8x32 - OpSaturatedSubMaskedInt8x32 - OpSignInt8x32 - OpSubInt8x32 OpSubMaskedInt8x32 - OpXorInt8x32 - OpAbsoluteInt8x64 - OpAbsoluteMaskedInt8x64 - OpAddInt8x64 - OpAddMaskedInt8x64 - OpCompressInt8x64 - OpEqualInt8x64 - OpEqualMaskedInt8x64 - OpGreaterInt8x64 - OpGreaterEqualInt8x64 - OpGreaterEqualMaskedInt8x64 - OpGreaterMaskedInt8x64 - OpLessInt8x64 - OpLessEqualInt8x64 - OpLessEqualMaskedInt8x64 - OpLessMaskedInt8x64 - OpMaxInt8x64 - OpMaxMaskedInt8x64 - OpMinInt8x64 - OpMinMaskedInt8x64 - OpNotEqualInt8x64 - OpNotEqualMaskedInt8x64 - OpPopCountInt8x64 - OpPopCountMaskedInt8x64 - OpSaturatedAddInt8x64 - OpSaturatedAddMaskedInt8x64 - OpSaturatedSubInt8x64 - OpSaturatedSubMaskedInt8x64 - OpSubInt8x64 OpSubMaskedInt8x64 - OpAddUint16x16 - OpAddMaskedUint16x16 - OpAndUint16x16 - OpAndNotUint16x16 - OpAverageUint16x16 - OpAverageMaskedUint16x16 - OpCompressUint16x16 - OpEqualUint16x16 - OpEqualMaskedUint16x16 - OpGreaterUint16x16 - OpGreaterEqualUint16x16 - OpGreaterEqualMaskedUint16x16 - OpGreaterMaskedUint16x16 - OpLessUint16x16 - OpLessEqualUint16x16 - OpLessEqualMaskedUint16x16 - OpLessMaskedUint16x16 - OpMaxUint16x16 - OpMaxMaskedUint16x16 - OpMinUint16x16 - OpMinMaskedUint16x16 - OpMulHighUint16x16 - OpMulHighMaskedUint16x16 - OpNotEqualUint16x16 - OpNotEqualMaskedUint16x16 - OpOrUint16x16 - OpPairwiseAddUint16x16 - OpPairwiseSubUint16x16 - OpPermuteInt16x16 - OpPermuteUint16x16 - OpPermute2Uint16x16 - OpPermute2Int16x16 - OpPermute2MaskedUint16x16 - OpPermute2MaskedInt16x16 - OpPermuteMaskedInt16x16 - OpPermuteMaskedUint16x16 - OpPopCountUint16x16 - OpPopCountMaskedUint16x16 - OpSaturatedAddUint16x16 - OpSaturatedAddMaskedUint16x16 - OpSaturatedSubUint16x16 - OpSaturatedSubMaskedUint16x16 - OpShiftAllLeftUint16x16 - OpShiftAllLeftMaskedUint16x16 - OpShiftAllRightUint16x16 - OpShiftAllRightMaskedUint16x16 - OpShiftLeftUint16x16 - OpShiftLeftAndFillUpperFromUint16x16 - OpShiftLeftAndFillUpperFromMaskedUint16x16 - OpShiftLeftMaskedUint16x16 - OpShiftRightUint16x16 - OpShiftRightAndFillUpperFromUint16x16 - OpShiftRightAndFillUpperFromMaskedUint16x16 - OpShiftRightMaskedUint16x16 - OpSubUint16x16 - OpSubMaskedUint16x16 - OpXorUint16x16 - OpAddUint16x32 - OpAddMaskedUint16x32 - OpAverageUint16x32 - OpAverageMaskedUint16x32 - OpCompressUint16x32 - OpEqualUint16x32 - OpEqualMaskedUint16x32 - OpGreaterUint16x32 - OpGreaterEqualUint16x32 - OpGreaterEqualMaskedUint16x32 - OpGreaterMaskedUint16x32 - OpLessUint16x32 - OpLessEqualUint16x32 - OpLessEqualMaskedUint16x32 - OpLessMaskedUint16x32 - OpMaxUint16x32 - OpMaxMaskedUint16x32 - OpMinUint16x32 - OpMinMaskedUint16x32 - OpMulHighUint16x32 - OpMulHighMaskedUint16x32 - OpNotEqualUint16x32 - OpNotEqualMaskedUint16x32 - OpPermuteUint16x32 - OpPermuteInt16x32 - OpPermute2Uint16x32 - OpPermute2Int16x32 - OpPermute2MaskedUint16x32 - OpPermute2MaskedInt16x32 - OpPermuteMaskedInt16x32 - OpPermuteMaskedUint16x32 - OpPopCountUint16x32 - OpPopCountMaskedUint16x32 - OpSaturatedAddUint16x32 - OpSaturatedAddMaskedUint16x32 - OpSaturatedSubUint16x32 - OpSaturatedSubMaskedUint16x32 - OpShiftAllLeftUint16x32 - OpShiftAllLeftMaskedUint16x32 - OpShiftAllRightUint16x32 - OpShiftAllRightMaskedUint16x32 - OpShiftLeftUint16x32 - OpShiftLeftAndFillUpperFromUint16x32 - OpShiftLeftAndFillUpperFromMaskedUint16x32 - OpShiftLeftMaskedUint16x32 - OpShiftRightUint16x32 - OpShiftRightAndFillUpperFromUint16x32 - OpShiftRightAndFillUpperFromMaskedUint16x32 - OpShiftRightMaskedUint16x32 - OpSubUint16x32 - OpSubMaskedUint16x32 - OpAddUint16x8 - OpAddMaskedUint16x8 - OpAndUint16x8 - OpAndNotUint16x8 - OpAverageUint16x8 - OpAverageMaskedUint16x8 - OpCompressUint16x8 - OpEqualUint16x8 - OpEqualMaskedUint16x8 - OpGreaterUint16x8 - OpGreaterEqualUint16x8 - OpGreaterEqualMaskedUint16x8 - OpGreaterMaskedUint16x8 - OpLessUint16x8 - OpLessEqualUint16x8 - OpLessEqualMaskedUint16x8 - OpLessMaskedUint16x8 - OpMaxUint16x8 - OpMaxMaskedUint16x8 - OpMinUint16x8 - OpMinMaskedUint16x8 - OpMulHighUint16x8 - OpMulHighMaskedUint16x8 - OpNotEqualUint16x8 - OpNotEqualMaskedUint16x8 - OpOrUint16x8 - OpPairwiseAddUint16x8 - OpPairwiseSubUint16x8 - OpPermuteInt16x8 - OpPermuteUint16x8 - OpPermute2Uint16x8 - OpPermute2Int16x8 - OpPermute2MaskedInt16x8 - OpPermute2MaskedUint16x8 - OpPermuteMaskedInt16x8 - OpPermuteMaskedUint16x8 - OpPopCountUint16x8 - OpPopCountMaskedUint16x8 - OpSaturatedAddUint16x8 - OpSaturatedAddMaskedUint16x8 - OpSaturatedSubUint16x8 - OpSaturatedSubMaskedUint16x8 - OpShiftAllLeftUint16x8 - OpShiftAllLeftMaskedUint16x8 - OpShiftAllRightUint16x8 - OpShiftAllRightMaskedUint16x8 - OpShiftLeftUint16x8 - OpShiftLeftAndFillUpperFromUint16x8 - OpShiftLeftAndFillUpperFromMaskedUint16x8 - OpShiftLeftMaskedUint16x8 - OpShiftRightUint16x8 - OpShiftRightAndFillUpperFromUint16x8 - OpShiftRightAndFillUpperFromMaskedUint16x8 - OpShiftRightMaskedUint16x8 - OpSubUint16x8 - OpSubMaskedUint16x8 - OpXorUint16x8 - OpAddUint32x16 - OpAddMaskedUint32x16 - OpAndUint32x16 - OpAndMaskedUint32x16 - OpAndNotUint32x16 - OpAndNotMaskedUint32x16 - OpCompressUint32x16 - OpEqualUint32x16 - OpEqualMaskedUint32x16 - OpGreaterUint32x16 - OpGreaterEqualUint32x16 - OpGreaterEqualMaskedUint32x16 - OpGreaterMaskedUint32x16 - OpLessUint32x16 - OpLessEqualUint32x16 - OpLessEqualMaskedUint32x16 - OpLessMaskedUint32x16 - OpMaxUint32x16 - OpMaxMaskedUint32x16 - OpMinUint32x16 - OpMinMaskedUint32x16 - OpNotEqualUint32x16 - OpNotEqualMaskedUint32x16 - OpOrUint32x16 - OpOrMaskedUint32x16 - OpPermuteInt32x16 - OpPermuteFloat32x16 - OpPermuteUint32x16 - OpPermute2Uint32x16 - OpPermute2Float32x16 - OpPermute2Int32x16 - OpPermute2MaskedUint32x16 - OpPermute2MaskedInt32x16 - OpPermute2MaskedFloat32x16 - OpPermuteMaskedFloat32x16 - OpPermuteMaskedInt32x16 - OpPermuteMaskedUint32x16 - OpPopCountUint32x16 - OpPopCountMaskedUint32x16 - OpRotateLeftUint32x16 - OpRotateLeftMaskedUint32x16 - OpRotateRightUint32x16 - OpRotateRightMaskedUint32x16 - OpShiftAllLeftUint32x16 - OpShiftAllLeftMaskedUint32x16 - OpShiftAllRightUint32x16 - OpShiftAllRightMaskedUint32x16 - OpShiftLeftUint32x16 - OpShiftLeftAndFillUpperFromUint32x16 - OpShiftLeftAndFillUpperFromMaskedUint32x16 - OpShiftLeftMaskedUint32x16 - OpShiftRightUint32x16 - OpShiftRightAndFillUpperFromUint32x16 - OpShiftRightAndFillUpperFromMaskedUint32x16 - OpShiftRightMaskedUint32x16 - OpSubUint32x16 - OpSubMaskedUint32x16 - OpXorUint32x16 - OpXorMaskedUint32x16 - OpAddUint32x4 - OpAddMaskedUint32x4 - OpAndUint32x4 - OpAndMaskedUint32x4 - OpAndNotUint32x4 - OpAndNotMaskedUint32x4 - OpCompressUint32x4 - OpEqualUint32x4 - OpEqualMaskedUint32x4 - OpGreaterUint32x4 - OpGreaterEqualUint32x4 - OpGreaterEqualMaskedUint32x4 - OpGreaterMaskedUint32x4 - OpLessUint32x4 - OpLessEqualUint32x4 - OpLessEqualMaskedUint32x4 - OpLessMaskedUint32x4 - OpMaxUint32x4 - OpMaxMaskedUint32x4 - OpMinUint32x4 - OpMinMaskedUint32x4 - OpMulEvenWidenUint32x4 - OpNotEqualUint32x4 - OpNotEqualMaskedUint32x4 - OpOrUint32x4 - OpOrMaskedUint32x4 - OpPairwiseAddUint32x4 - OpPairwiseSubUint32x4 - OpPermute2Float32x4 - OpPermute2Uint32x4 - OpPermute2Int32x4 - OpPermute2MaskedInt32x4 - OpPermute2MaskedUint32x4 - OpPermute2MaskedFloat32x4 - OpPopCountUint32x4 - OpPopCountMaskedUint32x4 - OpRotateLeftUint32x4 - OpRotateLeftMaskedUint32x4 - OpRotateRightUint32x4 - OpRotateRightMaskedUint32x4 - OpShiftAllLeftUint32x4 - OpShiftAllLeftMaskedUint32x4 - OpShiftAllRightUint32x4 - OpShiftAllRightMaskedUint32x4 - OpShiftLeftUint32x4 - OpShiftLeftAndFillUpperFromUint32x4 - OpShiftLeftAndFillUpperFromMaskedUint32x4 - OpShiftLeftMaskedUint32x4 - OpShiftRightUint32x4 - OpShiftRightAndFillUpperFromUint32x4 - OpShiftRightAndFillUpperFromMaskedUint32x4 - OpShiftRightMaskedUint32x4 - OpSubUint32x4 - OpSubMaskedUint32x4 - OpXorUint32x4 - OpXorMaskedUint32x4 - OpAddUint32x8 - OpAddMaskedUint32x8 - OpAndUint32x8 - OpAndMaskedUint32x8 - OpAndNotUint32x8 - OpAndNotMaskedUint32x8 - OpCompressUint32x8 - OpEqualUint32x8 - OpEqualMaskedUint32x8 - OpGreaterUint32x8 - OpGreaterEqualUint32x8 - OpGreaterEqualMaskedUint32x8 - OpGreaterMaskedUint32x8 - OpLessUint32x8 - OpLessEqualUint32x8 - OpLessEqualMaskedUint32x8 - OpLessMaskedUint32x8 - OpMaxUint32x8 - OpMaxMaskedUint32x8 - OpMinUint32x8 - OpMinMaskedUint32x8 - OpMulEvenWidenUint32x8 - OpNotEqualUint32x8 - OpNotEqualMaskedUint32x8 - OpOrUint32x8 - OpOrMaskedUint32x8 - OpPairwiseAddUint32x8 - OpPairwiseSubUint32x8 - OpPermuteUint32x8 - OpPermuteFloat32x8 - OpPermuteInt32x8 - OpPermute2Int32x8 - OpPermute2Float32x8 - OpPermute2Uint32x8 - OpPermute2MaskedFloat32x8 - OpPermute2MaskedUint32x8 - OpPermute2MaskedInt32x8 - OpPermuteMaskedInt32x8 - OpPermuteMaskedUint32x8 - OpPermuteMaskedFloat32x8 - OpPopCountUint32x8 - OpPopCountMaskedUint32x8 - OpRotateLeftUint32x8 - OpRotateLeftMaskedUint32x8 - OpRotateRightUint32x8 - OpRotateRightMaskedUint32x8 - OpShiftAllLeftUint32x8 - OpShiftAllLeftMaskedUint32x8 - OpShiftAllRightUint32x8 - OpShiftAllRightMaskedUint32x8 - OpShiftLeftUint32x8 - OpShiftLeftAndFillUpperFromUint32x8 - OpShiftLeftAndFillUpperFromMaskedUint32x8 - OpShiftLeftMaskedUint32x8 - OpShiftRightUint32x8 - OpShiftRightAndFillUpperFromUint32x8 - OpShiftRightAndFillUpperFromMaskedUint32x8 - OpShiftRightMaskedUint32x8 - OpSubUint32x8 - OpSubMaskedUint32x8 - OpXorUint32x8 - OpXorMaskedUint32x8 - OpAddUint64x2 - OpAddMaskedUint64x2 - OpAndUint64x2 - OpAndMaskedUint64x2 - OpAndNotUint64x2 - OpAndNotMaskedUint64x2 - OpCompressUint64x2 - OpEqualUint64x2 - OpEqualMaskedUint64x2 - OpGreaterUint64x2 - OpGreaterEqualUint64x2 - OpGreaterEqualMaskedUint64x2 - OpGreaterMaskedUint64x2 - OpLessUint64x2 - OpLessEqualUint64x2 - OpLessEqualMaskedUint64x2 - OpLessMaskedUint64x2 - OpMaxUint64x2 - OpMaxMaskedUint64x2 - OpMinUint64x2 - OpMinMaskedUint64x2 - OpMulEvenWidenUint64x2 - OpMulEvenWidenMaskedUint64x2 - OpNotEqualUint64x2 - OpNotEqualMaskedUint64x2 - OpOrUint64x2 - OpOrMaskedUint64x2 - OpPermute2Float64x2 - OpPermute2Uint64x2 - OpPermute2Int64x2 - OpPermute2MaskedInt64x2 - OpPermute2MaskedFloat64x2 - OpPermute2MaskedUint64x2 - OpPopCountUint64x2 - OpPopCountMaskedUint64x2 - OpRotateLeftUint64x2 - OpRotateLeftMaskedUint64x2 - OpRotateRightUint64x2 - OpRotateRightMaskedUint64x2 - OpShiftAllLeftUint64x2 - OpShiftAllLeftMaskedUint64x2 - OpShiftAllRightUint64x2 - OpShiftAllRightMaskedUint64x2 - OpShiftLeftUint64x2 - OpShiftLeftAndFillUpperFromUint64x2 - OpShiftLeftAndFillUpperFromMaskedUint64x2 - OpShiftLeftMaskedUint64x2 - OpShiftRightUint64x2 - OpShiftRightAndFillUpperFromUint64x2 - OpShiftRightAndFillUpperFromMaskedUint64x2 - OpShiftRightMaskedUint64x2 - OpSubUint64x2 - OpSubMaskedUint64x2 - OpXorUint64x2 - OpXorMaskedUint64x2 - OpAddUint64x4 - OpAddMaskedUint64x4 - OpAndUint64x4 - OpAndMaskedUint64x4 - OpAndNotUint64x4 - OpAndNotMaskedUint64x4 - OpCompressUint64x4 - OpEqualUint64x4 - OpEqualMaskedUint64x4 - OpGreaterUint64x4 - OpGreaterEqualUint64x4 - OpGreaterEqualMaskedUint64x4 - OpGreaterMaskedUint64x4 - OpLessUint64x4 - OpLessEqualUint64x4 - OpLessEqualMaskedUint64x4 - OpLessMaskedUint64x4 - OpMaxUint64x4 - OpMaxMaskedUint64x4 - OpMinUint64x4 - OpMinMaskedUint64x4 - OpMulEvenWidenUint64x4 - OpMulEvenWidenMaskedUint64x4 - OpNotEqualUint64x4 - OpNotEqualMaskedUint64x4 - OpOrUint64x4 - OpOrMaskedUint64x4 - OpPermuteUint64x4 - OpPermuteInt64x4 - OpPermuteFloat64x4 - OpPermute2Uint64x4 - OpPermute2Int64x4 - OpPermute2Float64x4 - OpPermute2MaskedUint64x4 - OpPermute2MaskedFloat64x4 - OpPermute2MaskedInt64x4 - OpPermuteMaskedUint64x4 - OpPermuteMaskedFloat64x4 - OpPermuteMaskedInt64x4 - OpPopCountUint64x4 - OpPopCountMaskedUint64x4 - OpRotateLeftUint64x4 - OpRotateLeftMaskedUint64x4 - OpRotateRightUint64x4 - OpRotateRightMaskedUint64x4 - OpShiftAllLeftUint64x4 - OpShiftAllLeftMaskedUint64x4 - OpShiftAllRightUint64x4 - OpShiftAllRightMaskedUint64x4 - OpShiftLeftUint64x4 - OpShiftLeftAndFillUpperFromUint64x4 - OpShiftLeftAndFillUpperFromMaskedUint64x4 - OpShiftLeftMaskedUint64x4 - OpShiftRightUint64x4 - OpShiftRightAndFillUpperFromUint64x4 - OpShiftRightAndFillUpperFromMaskedUint64x4 - OpShiftRightMaskedUint64x4 - OpSubUint64x4 - OpSubMaskedUint64x4 - OpXorUint64x4 - OpXorMaskedUint64x4 - OpAddUint64x8 - OpAddMaskedUint64x8 - OpAndUint64x8 - OpAndMaskedUint64x8 - OpAndNotUint64x8 - OpAndNotMaskedUint64x8 - OpCompressUint64x8 - OpEqualUint64x8 - OpEqualMaskedUint64x8 - OpGreaterUint64x8 - OpGreaterEqualUint64x8 - OpGreaterEqualMaskedUint64x8 - OpGreaterMaskedUint64x8 - OpLessUint64x8 - OpLessEqualUint64x8 - OpLessEqualMaskedUint64x8 - OpLessMaskedUint64x8 - OpMaxUint64x8 - OpMaxMaskedUint64x8 - OpMinUint64x8 - OpMinMaskedUint64x8 - OpMulEvenWidenUint64x8 - OpMulEvenWidenMaskedUint64x8 - OpNotEqualUint64x8 - OpNotEqualMaskedUint64x8 - OpOrUint64x8 - OpOrMaskedUint64x8 - OpPermuteUint64x8 - OpPermuteFloat64x8 - OpPermuteInt64x8 - OpPermute2Float64x8 - OpPermute2Uint64x8 - OpPermute2Int64x8 - OpPermute2MaskedFloat64x8 - OpPermute2MaskedUint64x8 - OpPermute2MaskedInt64x8 - OpPermuteMaskedInt64x8 - OpPermuteMaskedFloat64x8 - OpPermuteMaskedUint64x8 - OpPopCountUint64x8 - OpPopCountMaskedUint64x8 - OpRotateLeftUint64x8 - OpRotateLeftMaskedUint64x8 - OpRotateRightUint64x8 - OpRotateRightMaskedUint64x8 - OpShiftAllLeftUint64x8 - OpShiftAllLeftMaskedUint64x8 - OpShiftAllRightUint64x8 - OpShiftAllRightMaskedUint64x8 - OpShiftLeftUint64x8 - OpShiftLeftAndFillUpperFromUint64x8 - OpShiftLeftAndFillUpperFromMaskedUint64x8 - OpShiftLeftMaskedUint64x8 - OpShiftRightUint64x8 - OpShiftRightAndFillUpperFromUint64x8 - OpShiftRightAndFillUpperFromMaskedUint64x8 - OpShiftRightMaskedUint64x8 - OpSubUint64x8 - OpSubMaskedUint64x8 - OpXorUint64x8 - OpXorMaskedUint64x8 - OpAddUint8x16 - OpAddMaskedUint8x16 - OpAndUint8x16 - OpAndNotUint8x16 - OpAverageUint8x16 - OpAverageMaskedUint8x16 - OpCompressUint8x16 - OpEqualUint8x16 - OpEqualMaskedUint8x16 - OpGaloisFieldMulUint8x16 - OpGaloisFieldMulMaskedUint8x16 - OpGreaterUint8x16 - OpGreaterEqualUint8x16 - OpGreaterEqualMaskedUint8x16 - OpGreaterMaskedUint8x16 - OpLessUint8x16 - OpLessEqualUint8x16 - OpLessEqualMaskedUint8x16 - OpLessMaskedUint8x16 - OpMaxUint8x16 - OpMaxMaskedUint8x16 - OpMinUint8x16 - OpMinMaskedUint8x16 - OpNotEqualUint8x16 - OpNotEqualMaskedUint8x16 - OpOrUint8x16 - OpPermuteUint8x16 - OpPermuteInt8x16 - OpPermute2Uint8x16 - OpPermute2Int8x16 - OpPermute2MaskedInt8x16 - OpPermute2MaskedUint8x16 - OpPermuteMaskedUint8x16 - OpPermuteMaskedInt8x16 - OpPopCountUint8x16 - OpPopCountMaskedUint8x16 - OpSaturatedAddUint8x16 - OpSaturatedAddMaskedUint8x16 - OpSaturatedSubUint8x16 - OpSaturatedSubMaskedUint8x16 - OpSaturatedUnsignedSignedPairDotProdUint8x16 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 - OpSubUint8x16 + OpSubMaskedInt16x8 + OpSubMaskedInt16x16 + OpSubMaskedInt16x32 + OpSubMaskedInt32x4 + OpSubMaskedInt32x8 + OpSubMaskedInt32x16 + OpSubMaskedInt64x2 + OpSubMaskedInt64x4 + OpSubMaskedInt64x8 OpSubMaskedUint8x16 - OpXorUint8x16 - OpAddUint8x32 - OpAddMaskedUint8x32 - OpAndUint8x32 - OpAndNotUint8x32 - OpAverageUint8x32 - OpAverageMaskedUint8x32 - OpCompressUint8x32 - OpEqualUint8x32 - OpEqualMaskedUint8x32 - OpGaloisFieldMulUint8x32 - OpGaloisFieldMulMaskedUint8x32 - OpGreaterUint8x32 - OpGreaterEqualUint8x32 - OpGreaterEqualMaskedUint8x32 - OpGreaterMaskedUint8x32 - OpLessUint8x32 - OpLessEqualUint8x32 - OpLessEqualMaskedUint8x32 - OpLessMaskedUint8x32 - OpMaxUint8x32 - OpMaxMaskedUint8x32 - OpMinUint8x32 - OpMinMaskedUint8x32 - OpNotEqualUint8x32 - OpNotEqualMaskedUint8x32 - OpOrUint8x32 - OpPermuteUint8x32 - OpPermuteInt8x32 - OpPermute2Int8x32 - OpPermute2Uint8x32 - OpPermute2MaskedUint8x32 - OpPermute2MaskedInt8x32 - OpPermuteMaskedUint8x32 - OpPermuteMaskedInt8x32 - OpPopCountUint8x32 - OpPopCountMaskedUint8x32 - OpSaturatedAddUint8x32 - OpSaturatedAddMaskedUint8x32 - OpSaturatedSubUint8x32 - OpSaturatedSubMaskedUint8x32 - OpSaturatedUnsignedSignedPairDotProdUint8x32 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 - OpSubUint8x32 - OpSubMaskedUint8x32 - OpXorUint8x32 - OpAddUint8x64 - OpAddMaskedUint8x64 - OpAverageUint8x64 - OpAverageMaskedUint8x64 - OpCompressUint8x64 - OpEqualUint8x64 - OpEqualMaskedUint8x64 - OpGaloisFieldMulUint8x64 - OpGaloisFieldMulMaskedUint8x64 - OpGreaterUint8x64 - OpGreaterEqualUint8x64 - OpGreaterEqualMaskedUint8x64 - OpGreaterMaskedUint8x64 - OpLessUint8x64 - OpLessEqualUint8x64 - OpLessEqualMaskedUint8x64 - OpLessMaskedUint8x64 - OpMaxUint8x64 - OpMaxMaskedUint8x64 - OpMinUint8x64 - OpMinMaskedUint8x64 - OpNotEqualUint8x64 - OpNotEqualMaskedUint8x64 - OpPermuteInt8x64 - OpPermuteUint8x64 - OpPermute2Uint8x64 - OpPermute2Int8x64 - OpPermute2MaskedUint8x64 - OpPermute2MaskedInt8x64 - OpPermuteMaskedUint8x64 - OpPermuteMaskedInt8x64 - OpPopCountUint8x64 - OpPopCountMaskedUint8x64 - OpSaturatedAddUint8x64 - OpSaturatedAddMaskedUint8x64 - OpSaturatedSubUint8x64 - OpSaturatedSubMaskedUint8x64 - OpSaturatedUnsignedSignedPairDotProdUint8x64 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 - OpSubUint8x64 + OpSubMaskedUint8x32 OpSubMaskedUint8x64 - OpCeilWithPrecisionFloat32x16 - OpCeilWithPrecisionMaskedFloat32x16 - OpDiffWithCeilWithPrecisionFloat32x16 - OpDiffWithCeilWithPrecisionMaskedFloat32x16 - OpDiffWithFloorWithPrecisionFloat32x16 - OpDiffWithFloorWithPrecisionMaskedFloat32x16 - OpDiffWithRoundWithPrecisionFloat32x16 - OpDiffWithRoundWithPrecisionMaskedFloat32x16 - OpDiffWithTruncWithPrecisionFloat32x16 - OpDiffWithTruncWithPrecisionMaskedFloat32x16 - OpFloorWithPrecisionFloat32x16 - OpFloorWithPrecisionMaskedFloat32x16 - OpRoundWithPrecisionFloat32x16 - OpRoundWithPrecisionMaskedFloat32x16 - OpTruncWithPrecisionFloat32x16 - OpTruncWithPrecisionMaskedFloat32x16 + OpSubMaskedUint16x8 + OpSubMaskedUint16x16 + OpSubMaskedUint16x32 + OpSubMaskedUint32x4 + OpSubMaskedUint32x8 + OpSubMaskedUint32x16 + OpSubMaskedUint64x2 + OpSubMaskedUint64x4 + OpSubMaskedUint64x8 + OpSubUint8x16 + OpSubUint8x32 + OpSubUint8x64 + OpSubUint16x8 + OpSubUint16x16 + OpSubUint16x32 + OpSubUint32x4 + OpSubUint32x8 + OpSubUint32x16 + OpSubUint64x2 + OpSubUint64x4 + OpSubUint64x8 + OpTruncFloat32x4 + OpTruncFloat32x8 + OpTruncFloat64x2 + OpTruncFloat64x4 + OpUnsignedSignedQuadDotProdAccumulateInt32x4 + OpUnsignedSignedQuadDotProdAccumulateInt32x8 + OpUnsignedSignedQuadDotProdAccumulateInt32x16 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpXorInt8x16 + OpXorInt8x32 + OpXorInt16x8 + OpXorInt16x16 + OpXorInt32x4 + OpXorInt32x8 + OpXorInt32x16 + OpXorInt64x2 + OpXorInt64x4 + OpXorInt64x8 + OpXorMaskedInt32x4 + OpXorMaskedInt32x8 + OpXorMaskedInt32x16 + OpXorMaskedInt64x2 + OpXorMaskedInt64x4 + OpXorMaskedInt64x8 + OpXorMaskedUint32x4 + OpXorMaskedUint32x8 + OpXorMaskedUint32x16 + OpXorMaskedUint64x2 + OpXorMaskedUint64x4 + OpXorMaskedUint64x8 + OpXorUint8x16 + OpXorUint8x32 + OpXorUint16x8 + OpXorUint16x16 + OpXorUint32x4 + OpXorUint32x8 + OpXorUint32x16 + OpXorUint64x2 + OpXorUint64x4 + OpXorUint64x8 OpCeilWithPrecisionFloat32x4 - OpCeilWithPrecisionMaskedFloat32x4 - OpDiffWithCeilWithPrecisionFloat32x4 - OpDiffWithCeilWithPrecisionMaskedFloat32x4 - OpDiffWithFloorWithPrecisionFloat32x4 - OpDiffWithFloorWithPrecisionMaskedFloat32x4 - OpDiffWithRoundWithPrecisionFloat32x4 - OpDiffWithRoundWithPrecisionMaskedFloat32x4 - OpDiffWithTruncWithPrecisionFloat32x4 - OpDiffWithTruncWithPrecisionMaskedFloat32x4 - OpFloorWithPrecisionFloat32x4 - OpFloorWithPrecisionMaskedFloat32x4 - OpRoundWithPrecisionFloat32x4 - OpRoundWithPrecisionMaskedFloat32x4 - OpTruncWithPrecisionFloat32x4 - OpTruncWithPrecisionMaskedFloat32x4 OpCeilWithPrecisionFloat32x8 - OpCeilWithPrecisionMaskedFloat32x8 - OpDiffWithCeilWithPrecisionFloat32x8 - OpDiffWithCeilWithPrecisionMaskedFloat32x8 - OpDiffWithFloorWithPrecisionFloat32x8 - OpDiffWithFloorWithPrecisionMaskedFloat32x8 - OpDiffWithRoundWithPrecisionFloat32x8 - OpDiffWithRoundWithPrecisionMaskedFloat32x8 - OpDiffWithTruncWithPrecisionFloat32x8 - OpDiffWithTruncWithPrecisionMaskedFloat32x8 - OpFloorWithPrecisionFloat32x8 - OpFloorWithPrecisionMaskedFloat32x8 - OpGet128Float32x8 - OpRoundWithPrecisionFloat32x8 - OpRoundWithPrecisionMaskedFloat32x8 - OpSet128Float32x8 - OpTruncWithPrecisionFloat32x8 - OpTruncWithPrecisionMaskedFloat32x8 + OpCeilWithPrecisionFloat32x16 OpCeilWithPrecisionFloat64x2 - OpCeilWithPrecisionMaskedFloat64x2 - OpDiffWithCeilWithPrecisionFloat64x2 - OpDiffWithCeilWithPrecisionMaskedFloat64x2 - OpDiffWithFloorWithPrecisionFloat64x2 - OpDiffWithFloorWithPrecisionMaskedFloat64x2 - OpDiffWithRoundWithPrecisionFloat64x2 - OpDiffWithRoundWithPrecisionMaskedFloat64x2 - OpDiffWithTruncWithPrecisionFloat64x2 - OpDiffWithTruncWithPrecisionMaskedFloat64x2 - OpFloorWithPrecisionFloat64x2 - OpFloorWithPrecisionMaskedFloat64x2 - OpRoundWithPrecisionFloat64x2 - OpRoundWithPrecisionMaskedFloat64x2 - OpTruncWithPrecisionFloat64x2 - OpTruncWithPrecisionMaskedFloat64x2 OpCeilWithPrecisionFloat64x4 - OpCeilWithPrecisionMaskedFloat64x4 - OpDiffWithCeilWithPrecisionFloat64x4 - OpDiffWithCeilWithPrecisionMaskedFloat64x4 - OpDiffWithFloorWithPrecisionFloat64x4 - OpDiffWithFloorWithPrecisionMaskedFloat64x4 - OpDiffWithRoundWithPrecisionFloat64x4 - OpDiffWithRoundWithPrecisionMaskedFloat64x4 - OpDiffWithTruncWithPrecisionFloat64x4 - OpDiffWithTruncWithPrecisionMaskedFloat64x4 - OpFloorWithPrecisionFloat64x4 - OpFloorWithPrecisionMaskedFloat64x4 - OpGet128Float64x4 - OpRoundWithPrecisionFloat64x4 - OpRoundWithPrecisionMaskedFloat64x4 - OpSet128Float64x4 - OpTruncWithPrecisionFloat64x4 - OpTruncWithPrecisionMaskedFloat64x4 OpCeilWithPrecisionFloat64x8 + OpCeilWithPrecisionMaskedFloat32x4 + OpCeilWithPrecisionMaskedFloat32x8 + OpCeilWithPrecisionMaskedFloat32x16 + OpCeilWithPrecisionMaskedFloat64x2 + OpCeilWithPrecisionMaskedFloat64x4 OpCeilWithPrecisionMaskedFloat64x8 + OpDiffWithCeilWithPrecisionFloat32x4 + OpDiffWithCeilWithPrecisionFloat32x8 + OpDiffWithCeilWithPrecisionFloat32x16 + OpDiffWithCeilWithPrecisionFloat64x2 + OpDiffWithCeilWithPrecisionFloat64x4 OpDiffWithCeilWithPrecisionFloat64x8 + OpDiffWithCeilWithPrecisionMaskedFloat32x4 + OpDiffWithCeilWithPrecisionMaskedFloat32x8 + OpDiffWithCeilWithPrecisionMaskedFloat32x16 + OpDiffWithCeilWithPrecisionMaskedFloat64x2 + OpDiffWithCeilWithPrecisionMaskedFloat64x4 OpDiffWithCeilWithPrecisionMaskedFloat64x8 + OpDiffWithFloorWithPrecisionFloat32x4 + OpDiffWithFloorWithPrecisionFloat32x8 + OpDiffWithFloorWithPrecisionFloat32x16 + OpDiffWithFloorWithPrecisionFloat64x2 + OpDiffWithFloorWithPrecisionFloat64x4 OpDiffWithFloorWithPrecisionFloat64x8 + OpDiffWithFloorWithPrecisionMaskedFloat32x4 + OpDiffWithFloorWithPrecisionMaskedFloat32x8 + OpDiffWithFloorWithPrecisionMaskedFloat32x16 + OpDiffWithFloorWithPrecisionMaskedFloat64x2 + OpDiffWithFloorWithPrecisionMaskedFloat64x4 OpDiffWithFloorWithPrecisionMaskedFloat64x8 + OpDiffWithRoundWithPrecisionFloat32x4 + OpDiffWithRoundWithPrecisionFloat32x8 + OpDiffWithRoundWithPrecisionFloat32x16 + OpDiffWithRoundWithPrecisionFloat64x2 + OpDiffWithRoundWithPrecisionFloat64x4 OpDiffWithRoundWithPrecisionFloat64x8 + OpDiffWithRoundWithPrecisionMaskedFloat32x4 + OpDiffWithRoundWithPrecisionMaskedFloat32x8 + OpDiffWithRoundWithPrecisionMaskedFloat32x16 + OpDiffWithRoundWithPrecisionMaskedFloat64x2 + OpDiffWithRoundWithPrecisionMaskedFloat64x4 OpDiffWithRoundWithPrecisionMaskedFloat64x8 + OpDiffWithTruncWithPrecisionFloat32x4 + OpDiffWithTruncWithPrecisionFloat32x8 + OpDiffWithTruncWithPrecisionFloat32x16 + OpDiffWithTruncWithPrecisionFloat64x2 + OpDiffWithTruncWithPrecisionFloat64x4 OpDiffWithTruncWithPrecisionFloat64x8 + OpDiffWithTruncWithPrecisionMaskedFloat32x4 + OpDiffWithTruncWithPrecisionMaskedFloat32x8 + OpDiffWithTruncWithPrecisionMaskedFloat32x16 + OpDiffWithTruncWithPrecisionMaskedFloat64x2 + OpDiffWithTruncWithPrecisionMaskedFloat64x4 OpDiffWithTruncWithPrecisionMaskedFloat64x8 + OpFloorWithPrecisionFloat32x4 + OpFloorWithPrecisionFloat32x8 + OpFloorWithPrecisionFloat32x16 + OpFloorWithPrecisionFloat64x2 + OpFloorWithPrecisionFloat64x4 OpFloorWithPrecisionFloat64x8 + OpFloorWithPrecisionMaskedFloat32x4 + OpFloorWithPrecisionMaskedFloat32x8 + OpFloorWithPrecisionMaskedFloat32x16 + OpFloorWithPrecisionMaskedFloat64x2 + OpFloorWithPrecisionMaskedFloat64x4 OpFloorWithPrecisionMaskedFloat64x8 - OpRoundWithPrecisionFloat64x8 - OpRoundWithPrecisionMaskedFloat64x8 - OpTruncWithPrecisionFloat64x8 - OpTruncWithPrecisionMaskedFloat64x8 + OpGaloisFieldAffineTransformInverseMaskedUint8x16 + OpGaloisFieldAffineTransformInverseMaskedUint8x32 + OpGaloisFieldAffineTransformInverseMaskedUint8x64 + OpGaloisFieldAffineTransformInverseUint8x16 + OpGaloisFieldAffineTransformInverseUint8x32 + OpGaloisFieldAffineTransformInverseUint8x64 + OpGaloisFieldAffineTransformMaskedUint8x16 + OpGaloisFieldAffineTransformMaskedUint8x32 + OpGaloisFieldAffineTransformMaskedUint8x64 + OpGaloisFieldAffineTransformUint8x16 + OpGaloisFieldAffineTransformUint8x32 + OpGaloisFieldAffineTransformUint8x64 + OpGet128Float32x8 + OpGet128Float64x4 + OpGet128Int8x32 OpGet128Int16x16 - OpSet128Int16x16 - OpShiftAllLeftAndFillUpperFromInt16x16 - OpShiftAllLeftAndFillUpperFromMaskedInt16x16 - OpShiftAllRightAndFillUpperFromInt16x16 - OpShiftAllRightAndFillUpperFromMaskedInt16x16 - OpShiftAllLeftAndFillUpperFromInt16x32 - OpShiftAllLeftAndFillUpperFromMaskedInt16x32 - OpShiftAllRightAndFillUpperFromInt16x32 - OpShiftAllRightAndFillUpperFromMaskedInt16x32 + OpGet128Int32x8 + OpGet128Int64x4 + OpGet128Uint8x32 + OpGet128Uint16x16 + OpGet128Uint32x8 + OpGet128Uint64x4 + OpGetElemInt8x16 OpGetElemInt16x8 - OpSetElemInt16x8 - OpShiftAllLeftAndFillUpperFromInt16x8 - OpShiftAllLeftAndFillUpperFromMaskedInt16x8 - OpShiftAllRightAndFillUpperFromInt16x8 - OpShiftAllRightAndFillUpperFromMaskedInt16x8 - OpRotateAllLeftInt32x16 - OpRotateAllLeftMaskedInt32x16 - OpRotateAllRightInt32x16 - OpRotateAllRightMaskedInt32x16 - OpShiftAllLeftAndFillUpperFromInt32x16 - OpShiftAllLeftAndFillUpperFromMaskedInt32x16 - OpShiftAllRightAndFillUpperFromInt32x16 - OpShiftAllRightAndFillUpperFromMaskedInt32x16 OpGetElemInt32x4 + OpGetElemInt64x2 + OpGetElemUint8x16 + OpGetElemUint16x8 + OpGetElemUint32x4 + OpGetElemUint64x2 OpRotateAllLeftInt32x4 - OpRotateAllLeftMaskedInt32x4 - OpRotateAllRightInt32x4 - OpRotateAllRightMaskedInt32x4 - OpSetElemInt32x4 - OpShiftAllLeftAndFillUpperFromInt32x4 - OpShiftAllLeftAndFillUpperFromMaskedInt32x4 - OpShiftAllRightAndFillUpperFromInt32x4 - OpShiftAllRightAndFillUpperFromMaskedInt32x4 - OpGet128Int32x8 OpRotateAllLeftInt32x8 - OpRotateAllLeftMaskedInt32x8 - OpRotateAllRightInt32x8 - OpRotateAllRightMaskedInt32x8 - OpSet128Int32x8 - OpShiftAllLeftAndFillUpperFromInt32x8 - OpShiftAllLeftAndFillUpperFromMaskedInt32x8 - OpShiftAllRightAndFillUpperFromInt32x8 - OpShiftAllRightAndFillUpperFromMaskedInt32x8 - OpGetElemInt64x2 + OpRotateAllLeftInt32x16 OpRotateAllLeftInt64x2 - OpRotateAllLeftMaskedInt64x2 - OpRotateAllRightInt64x2 - OpRotateAllRightMaskedInt64x2 - OpSetElemInt64x2 - OpShiftAllLeftAndFillUpperFromInt64x2 - OpShiftAllLeftAndFillUpperFromMaskedInt64x2 - OpShiftAllRightAndFillUpperFromInt64x2 - OpShiftAllRightAndFillUpperFromMaskedInt64x2 - OpGet128Int64x4 OpRotateAllLeftInt64x4 + OpRotateAllLeftInt64x8 + OpRotateAllLeftMaskedInt32x4 + OpRotateAllLeftMaskedInt32x8 + OpRotateAllLeftMaskedInt32x16 + OpRotateAllLeftMaskedInt64x2 OpRotateAllLeftMaskedInt64x4 + OpRotateAllLeftMaskedInt64x8 + OpRotateAllLeftMaskedUint32x4 + OpRotateAllLeftMaskedUint32x8 + OpRotateAllLeftMaskedUint32x16 + OpRotateAllLeftMaskedUint64x2 + OpRotateAllLeftMaskedUint64x4 + OpRotateAllLeftMaskedUint64x8 + OpRotateAllLeftUint32x4 + OpRotateAllLeftUint32x8 + OpRotateAllLeftUint32x16 + OpRotateAllLeftUint64x2 + OpRotateAllLeftUint64x4 + OpRotateAllLeftUint64x8 + OpRotateAllRightInt32x4 + OpRotateAllRightInt32x8 + OpRotateAllRightInt32x16 + OpRotateAllRightInt64x2 OpRotateAllRightInt64x4 + OpRotateAllRightInt64x8 + OpRotateAllRightMaskedInt32x4 + OpRotateAllRightMaskedInt32x8 + OpRotateAllRightMaskedInt32x16 + OpRotateAllRightMaskedInt64x2 OpRotateAllRightMaskedInt64x4 + OpRotateAllRightMaskedInt64x8 + OpRotateAllRightMaskedUint32x4 + OpRotateAllRightMaskedUint32x8 + OpRotateAllRightMaskedUint32x16 + OpRotateAllRightMaskedUint64x2 + OpRotateAllRightMaskedUint64x4 + OpRotateAllRightMaskedUint64x8 + OpRotateAllRightUint32x4 + OpRotateAllRightUint32x8 + OpRotateAllRightUint32x16 + OpRotateAllRightUint64x2 + OpRotateAllRightUint64x4 + OpRotateAllRightUint64x8 + OpRoundWithPrecisionFloat32x4 + OpRoundWithPrecisionFloat32x8 + OpRoundWithPrecisionFloat32x16 + OpRoundWithPrecisionFloat64x2 + OpRoundWithPrecisionFloat64x4 + OpRoundWithPrecisionFloat64x8 + OpRoundWithPrecisionMaskedFloat32x4 + OpRoundWithPrecisionMaskedFloat32x8 + OpRoundWithPrecisionMaskedFloat32x16 + OpRoundWithPrecisionMaskedFloat64x2 + OpRoundWithPrecisionMaskedFloat64x4 + OpRoundWithPrecisionMaskedFloat64x8 + OpSet128Float32x8 + OpSet128Float64x4 + OpSet128Int8x32 + OpSet128Int16x16 + OpSet128Int32x8 OpSet128Int64x4 + OpSet128Uint8x32 + OpSet128Uint16x16 + OpSet128Uint32x8 + OpSet128Uint64x4 + OpSetElemInt8x16 + OpSetElemInt16x8 + OpSetElemInt32x4 + OpSetElemInt64x2 + OpSetElemUint8x16 + OpSetElemUint16x8 + OpSetElemUint32x4 + OpSetElemUint64x2 + OpShiftAllLeftAndFillUpperFromInt16x8 + OpShiftAllLeftAndFillUpperFromInt16x16 + OpShiftAllLeftAndFillUpperFromInt16x32 + OpShiftAllLeftAndFillUpperFromInt32x4 + OpShiftAllLeftAndFillUpperFromInt32x8 + OpShiftAllLeftAndFillUpperFromInt32x16 + OpShiftAllLeftAndFillUpperFromInt64x2 OpShiftAllLeftAndFillUpperFromInt64x4 - OpShiftAllLeftAndFillUpperFromMaskedInt64x4 - OpShiftAllRightAndFillUpperFromInt64x4 - OpShiftAllRightAndFillUpperFromMaskedInt64x4 - OpRotateAllLeftInt64x8 - OpRotateAllLeftMaskedInt64x8 - OpRotateAllRightInt64x8 - OpRotateAllRightMaskedInt64x8 OpShiftAllLeftAndFillUpperFromInt64x8 + OpShiftAllLeftAndFillUpperFromMaskedInt16x8 + OpShiftAllLeftAndFillUpperFromMaskedInt16x16 + OpShiftAllLeftAndFillUpperFromMaskedInt16x32 + OpShiftAllLeftAndFillUpperFromMaskedInt32x4 + OpShiftAllLeftAndFillUpperFromMaskedInt32x8 + OpShiftAllLeftAndFillUpperFromMaskedInt32x16 + OpShiftAllLeftAndFillUpperFromMaskedInt64x2 + OpShiftAllLeftAndFillUpperFromMaskedInt64x4 OpShiftAllLeftAndFillUpperFromMaskedInt64x8 - OpShiftAllRightAndFillUpperFromInt64x8 - OpShiftAllRightAndFillUpperFromMaskedInt64x8 - OpGetElemInt8x16 - OpSetElemInt8x16 - OpGet128Int8x32 - OpSet128Int8x32 - OpGet128Uint16x16 - OpSet128Uint16x16 - OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromMaskedUint16x8 OpShiftAllLeftAndFillUpperFromMaskedUint16x16 - OpShiftAllRightAndFillUpperFromUint16x16 - OpShiftAllRightAndFillUpperFromMaskedUint16x16 - OpShiftAllLeftAndFillUpperFromUint16x32 OpShiftAllLeftAndFillUpperFromMaskedUint16x32 - OpShiftAllRightAndFillUpperFromUint16x32 - OpShiftAllRightAndFillUpperFromMaskedUint16x32 - OpGetElemUint16x8 - OpSetElemUint16x8 + OpShiftAllLeftAndFillUpperFromMaskedUint32x4 + OpShiftAllLeftAndFillUpperFromMaskedUint32x8 + OpShiftAllLeftAndFillUpperFromMaskedUint32x16 + OpShiftAllLeftAndFillUpperFromMaskedUint64x2 + OpShiftAllLeftAndFillUpperFromMaskedUint64x4 + OpShiftAllLeftAndFillUpperFromMaskedUint64x8 OpShiftAllLeftAndFillUpperFromUint16x8 - OpShiftAllLeftAndFillUpperFromMaskedUint16x8 - OpShiftAllRightAndFillUpperFromUint16x8 - OpShiftAllRightAndFillUpperFromMaskedUint16x8 - OpRotateAllLeftUint32x16 - OpRotateAllLeftMaskedUint32x16 - OpRotateAllRightUint32x16 - OpRotateAllRightMaskedUint32x16 + OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromUint16x32 + OpShiftAllLeftAndFillUpperFromUint32x4 + OpShiftAllLeftAndFillUpperFromUint32x8 OpShiftAllLeftAndFillUpperFromUint32x16 - OpShiftAllLeftAndFillUpperFromMaskedUint32x16 - OpShiftAllRightAndFillUpperFromUint32x16 + OpShiftAllLeftAndFillUpperFromUint64x2 + OpShiftAllLeftAndFillUpperFromUint64x4 + OpShiftAllLeftAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromInt16x8 + OpShiftAllRightAndFillUpperFromInt16x16 + OpShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromInt32x4 + OpShiftAllRightAndFillUpperFromInt32x8 + OpShiftAllRightAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromInt64x2 + OpShiftAllRightAndFillUpperFromInt64x4 + OpShiftAllRightAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromMaskedInt16x8 + OpShiftAllRightAndFillUpperFromMaskedInt16x16 + OpShiftAllRightAndFillUpperFromMaskedInt16x32 + OpShiftAllRightAndFillUpperFromMaskedInt32x4 + OpShiftAllRightAndFillUpperFromMaskedInt32x8 + OpShiftAllRightAndFillUpperFromMaskedInt32x16 + OpShiftAllRightAndFillUpperFromMaskedInt64x2 + OpShiftAllRightAndFillUpperFromMaskedInt64x4 + OpShiftAllRightAndFillUpperFromMaskedInt64x8 + OpShiftAllRightAndFillUpperFromMaskedUint16x8 + OpShiftAllRightAndFillUpperFromMaskedUint16x16 + OpShiftAllRightAndFillUpperFromMaskedUint16x32 + OpShiftAllRightAndFillUpperFromMaskedUint32x4 + OpShiftAllRightAndFillUpperFromMaskedUint32x8 OpShiftAllRightAndFillUpperFromMaskedUint32x16 - OpGetElemUint32x4 - OpRotateAllLeftUint32x4 - OpRotateAllLeftMaskedUint32x4 - OpRotateAllRightUint32x4 - OpRotateAllRightMaskedUint32x4 - OpSetElemUint32x4 - OpShiftAllLeftAndFillUpperFromUint32x4 - OpShiftAllLeftAndFillUpperFromMaskedUint32x4 + OpShiftAllRightAndFillUpperFromMaskedUint64x2 + OpShiftAllRightAndFillUpperFromMaskedUint64x4 + OpShiftAllRightAndFillUpperFromMaskedUint64x8 + OpShiftAllRightAndFillUpperFromUint16x8 + OpShiftAllRightAndFillUpperFromUint16x16 + OpShiftAllRightAndFillUpperFromUint16x32 OpShiftAllRightAndFillUpperFromUint32x4 - OpShiftAllRightAndFillUpperFromMaskedUint32x4 - OpGet128Uint32x8 - OpRotateAllLeftUint32x8 - OpRotateAllLeftMaskedUint32x8 - OpRotateAllRightUint32x8 - OpRotateAllRightMaskedUint32x8 - OpSet128Uint32x8 - OpShiftAllLeftAndFillUpperFromUint32x8 - OpShiftAllLeftAndFillUpperFromMaskedUint32x8 OpShiftAllRightAndFillUpperFromUint32x8 - OpShiftAllRightAndFillUpperFromMaskedUint32x8 - OpGetElemUint64x2 - OpRotateAllLeftUint64x2 - OpRotateAllLeftMaskedUint64x2 - OpRotateAllRightUint64x2 - OpRotateAllRightMaskedUint64x2 - OpSetElemUint64x2 - OpShiftAllLeftAndFillUpperFromUint64x2 - OpShiftAllLeftAndFillUpperFromMaskedUint64x2 + OpShiftAllRightAndFillUpperFromUint32x16 OpShiftAllRightAndFillUpperFromUint64x2 - OpShiftAllRightAndFillUpperFromMaskedUint64x2 - OpGet128Uint64x4 - OpRotateAllLeftUint64x4 - OpRotateAllLeftMaskedUint64x4 - OpRotateAllRightUint64x4 - OpRotateAllRightMaskedUint64x4 - OpSet128Uint64x4 - OpShiftAllLeftAndFillUpperFromUint64x4 - OpShiftAllLeftAndFillUpperFromMaskedUint64x4 OpShiftAllRightAndFillUpperFromUint64x4 - OpShiftAllRightAndFillUpperFromMaskedUint64x4 - OpRotateAllLeftUint64x8 - OpRotateAllLeftMaskedUint64x8 - OpRotateAllRightUint64x8 - OpRotateAllRightMaskedUint64x8 - OpShiftAllLeftAndFillUpperFromUint64x8 - OpShiftAllLeftAndFillUpperFromMaskedUint64x8 OpShiftAllRightAndFillUpperFromUint64x8 - OpShiftAllRightAndFillUpperFromMaskedUint64x8 - OpGaloisFieldAffineTransformUint8x16 - OpGaloisFieldAffineTransformInverseUint8x16 - OpGaloisFieldAffineTransformInverseMaskedUint8x16 - OpGaloisFieldAffineTransformMaskedUint8x16 - OpGetElemUint8x16 - OpSetElemUint8x16 - OpGaloisFieldAffineTransformUint8x32 - OpGaloisFieldAffineTransformInverseUint8x32 - OpGaloisFieldAffineTransformInverseMaskedUint8x32 - OpGaloisFieldAffineTransformMaskedUint8x32 - OpGet128Uint8x32 - OpSet128Uint8x32 - OpGaloisFieldAffineTransformUint8x64 - OpGaloisFieldAffineTransformInverseUint8x64 - OpGaloisFieldAffineTransformInverseMaskedUint8x64 - OpGaloisFieldAffineTransformMaskedUint8x64 + OpTruncWithPrecisionFloat32x4 + OpTruncWithPrecisionFloat32x8 + OpTruncWithPrecisionFloat32x16 + OpTruncWithPrecisionFloat64x2 + OpTruncWithPrecisionFloat64x4 + OpTruncWithPrecisionFloat64x8 + OpTruncWithPrecisionMaskedFloat32x4 + OpTruncWithPrecisionMaskedFloat32x8 + OpTruncWithPrecisionMaskedFloat32x16 + OpTruncWithPrecisionMaskedFloat64x2 + OpTruncWithPrecisionMaskedFloat64x4 + OpTruncWithPrecisionMaskedFloat64x8 ) var opcodeTable = [...]opInfo{ @@ -19006,30 +19006,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS512", + name: "VADDPD128", argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VADDPSMasked512", - argLen: 3, + name: "VADDPD256", + argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19037,12 +19036,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS512", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPD512", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19050,13 +19051,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked512", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19064,26 +19067,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PS512", - argLen: 1, - asm: x86.AVRSQRT14PS, + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PSMasked512", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VADDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19091,13 +19099,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPSMasked512", - argLen: 2, - asm: x86.AVCOMPRESSPS, + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19105,9 +19114,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS512", - argLen: 2, - asm: x86.AVDIVPS, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPS512", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19119,9 +19144,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked512", - argLen: 3, - asm: x86.AVDIVPS, + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19134,15 +19160,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19150,16 +19176,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19167,15 +19192,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VADDSUBPD128", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19183,16 +19206,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19200,15 +19220,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VADDSUBPS128", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19216,16 +19234,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCOMPRESSPDMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19233,30 +19262,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS512", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VCOMPRESSPDMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMAXPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VCOMPRESSPDMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19264,30 +19290,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS512", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VCOMPRESSPSMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMINPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VCOMPRESSPSMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19295,24 +19318,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS512", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VCOMPRESSPSMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPS512", + name: "VDIVPD128", argLen: 2, - asm: x86.AVSCALEFPS, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD256", + argLen: 2, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD512", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19324,9 +19374,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked512", + name: "VDIVPDMasked128", argLen: 3, - asm: x86.AVSCALEFPS, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19339,10 +19389,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19355,26 +19404,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS512", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VDIVPDMasked512", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSQRTPSMasked512", + name: "VDIVPS128", argLen: 2, - asm: x86.AVSQRTPS, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19382,23 +19433,67 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS512", + name: "VDIVPS256", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS512", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSUBPSMasked512", + name: "VDIVPSMasked512", argLen: 3, - asm: x86.AVSUBPS, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19411,14 +19506,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS128", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, + name: "VFMADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19426,15 +19522,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19442,13 +19538,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS128", - argLen: 2, - asm: x86.AVADDSUBPS, + name: "VFMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19456,12 +19554,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCPPS128", - argLen: 1, - asm: x86.AVRCPPS, + name: "VFMADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19469,13 +19571,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19483,12 +19588,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS128", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VFMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19496,13 +19605,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19510,13 +19621,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPSMasked128", - argLen: 2, - asm: x86.AVCOMPRESSPS, + name: "VFMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19524,13 +19637,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS128", - argLen: 2, - asm: x86.AVDIVPS, + name: "VFMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19538,14 +19653,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19553,15 +19670,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS128", - argLen: 3, + name: "VFMADD213PSMasked256", + argLen: 4, resultInArg0: true, asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19569,7 +19687,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked128", + name: "VFMADD213PSMasked512", argLen: 4, resultInArg0: true, asm: x86.AVFMADD213PS, @@ -19586,10 +19704,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS128", + name: "VFMADDSUB213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19602,16 +19720,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked128", - argLen: 4, + name: "VFMADDSUB213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19619,10 +19736,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS128", + name: "VFMADDSUB213PD512", argLen: 3, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19635,10 +19752,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked128", + name: "VFMADDSUB213PDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19652,14 +19769,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS128", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMADDSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19667,15 +19786,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMADDSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19683,14 +19803,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS128", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VFMADDSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19698,15 +19819,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMADDSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19714,14 +19835,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS128", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VFMADDSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19729,28 +19851,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS128", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VFMADDSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPSMasked128", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VFMADDSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19758,15 +19885,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VFMADDSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19774,13 +19902,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS128", - argLen: 2, - asm: x86.AVHADDPS, + name: "VFMSUBADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19788,13 +19918,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS128", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VFMSUBADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19802,12 +19934,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS128", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VFMSUBADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19815,13 +19950,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked128", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VFMSUBADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19829,13 +19967,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS128", - argLen: 2, - asm: x86.AVSUBPS, + name: "VFMSUBADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19843,14 +19984,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked128", - argLen: 3, - asm: x86.AVSUBPS, + name: "VFMSUBADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19858,14 +20001,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS256", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, + name: "VFMSUBADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19873,15 +20017,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMSUBADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19889,13 +20033,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, + name: "VFMSUBADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19903,12 +20049,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCPPS256", - argLen: 1, - asm: x86.AVRCPPS, + name: "VFMSUBADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19916,13 +20066,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked256", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMSUBADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19930,12 +20083,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VFMSUBADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19943,51 +20100,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked256", + name: "VGF2P8MULB128", argLen: 2, - asm: x86.AVRSQRT14PS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCOMPRESSPSMasked256", + name: "VGF2P8MULB256", argLen: 2, - asm: x86.AVCOMPRESSPS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VDIVPS256", + name: "VGF2P8MULB512", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VDIVPSMasked256", + name: "VGF2P8MULBMasked128", argLen: 3, - asm: x86.AVDIVPS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20000,15 +20157,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VGF2P8MULBMasked256", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20016,16 +20172,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VGF2P8MULBMasked512", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20033,15 +20187,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20049,16 +20201,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VHADDPD256", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20066,15 +20215,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20082,16 +20229,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VHADDPS256", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20099,10 +20243,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20114,15 +20257,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VHSUBPD256", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20130,10 +20271,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPS256", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20145,15 +20299,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked256", - argLen: 3, + name: "VMAXPD128", + argLen: 2, commutative: true, - asm: x86.AVMINPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20161,10 +20314,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS256", + name: "VMAXPD256", argLen: 2, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20176,9 +20329,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VMAXPD512", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20190,9 +20344,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20205,10 +20360,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked256", + name: "VMAXPDMasked256", argLen: 3, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20221,13 +20376,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS256", - argLen: 2, - asm: x86.AVHADDPS, + name: "VMAXPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20235,9 +20392,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS256", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VMAXPS128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20249,12 +20407,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS256", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VMAXPS256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20262,27 +20422,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VMAXPS512", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSUBPS256", - argLen: 2, - asm: x86.AVSUBPS, + name: "VMAXPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20290,9 +20453,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked256", - argLen: 3, - asm: x86.AVSUBPS, + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20305,14 +20469,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD128", - argLen: 2, + name: "VMAXPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20320,15 +20485,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked128", - argLen: 3, + name: "VMINPD128", + argLen: 2, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20336,9 +20500,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD128", - argLen: 2, - asm: x86.AVADDSUBPD, + name: "VMINPD256", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20350,12 +20515,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VMINPD512", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20363,13 +20530,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20377,26 +20546,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VRSQRT14PDMasked128", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VMINPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20404,13 +20562,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPDMasked128", - argLen: 2, - asm: x86.AVCOMPRESSPD, + name: "VMINPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20418,9 +20578,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD128", - argLen: 2, - asm: x86.AVDIVPD, + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20432,14 +20593,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked128", - argLen: 3, - asm: x86.AVDIVPD, + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20447,32 +20608,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VMINPS512", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20480,15 +20639,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20496,16 +20655,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VMINPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20513,15 +20671,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20529,16 +20686,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VMULPD256", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20546,25 +20701,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMULPD512", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VMAXPDMasked128", + name: "VMULPDMasked128", argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20577,14 +20732,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", - argLen: 2, + name: "VMULPDMasked256", + argLen: 3, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20592,10 +20748,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", + name: "VMULPDMasked512", argLen: 3, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20608,10 +20764,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD128", + name: "VMULPS128", argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20623,39 +20779,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD128", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VMULPS256", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked128", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VMULPS512", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VMULPDMasked128", + name: "VMULPSMasked128", argLen: 3, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20668,13 +20825,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", - argLen: 2, - asm: x86.AVHADDPD, + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20682,13 +20841,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VMULPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20696,9 +20857,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", + name: "VPABSB128", argLen: 1, - asm: x86.AVSQRTPD, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20709,13 +20870,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20723,28 +20883,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD128", - argLen: 2, - asm: x86.AVSUBPD, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSUBPDMasked128", - argLen: 3, - asm: x86.AVSUBPD, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20752,14 +20910,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20767,15 +20924,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20783,13 +20938,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", - argLen: 2, - asm: x86.AVADDSUBPD, + name: "VPABSD128", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20797,26 +20951,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", + name: "VPABSD256", argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VRCP14PDMasked256", - argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20824,9 +20964,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", + name: "VPABSD512", argLen: 1, - asm: x86.AVRSQRT14PD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20837,9 +20977,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked256", + name: "VPABSDMasked128", argLen: 2, - asm: x86.AVRSQRT14PD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20851,9 +20991,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPDMasked256", + name: "VPABSDMasked256", argLen: 2, - asm: x86.AVCOMPRESSPD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20865,28 +21005,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", + name: "VPABSDMasked512", argLen: 2, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPDMasked256", - argLen: 3, - asm: x86.AVDIVPD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20894,65 +21019,52 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPABSQ128", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPABSQ256", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADDSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADDSUB213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPABSQMasked128", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20960,15 +21072,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPABSQMasked256", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20976,16 +21086,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPABSQMasked512", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20993,14 +21100,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPD, + name: "VPABSW128", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21008,15 +21113,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21024,30 +21126,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD256", - argLen: 2, - commutative: true, - asm: x86.AVMINPD, + name: "VPABSW512", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VMINPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VPABSWMasked128", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21055,14 +21153,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD256", - argLen: 2, - commutative: true, - asm: x86.AVMULPD, + name: "VPABSWMasked256", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21070,28 +21167,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD256", + name: "VPABSWMasked512", argLen: 2, - asm: x86.AVSCALEFPD, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked256", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VPADDB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21099,15 +21196,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked256", - argLen: 3, + name: "VPADDB256", + argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21115,27 +21211,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD256", - argLen: 2, - asm: x86.AVHADDPD, + name: "VPADDB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VHSUBPD256", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21143,12 +21242,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD256", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21156,13 +21258,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked256", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VPADDBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21170,9 +21274,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD256", - argLen: 2, - asm: x86.AVSUBPD, + name: "VPADDD128", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21184,14 +21289,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked256", - argLen: 3, - asm: x86.AVSUBPD, + name: "VPADDD256", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21199,10 +21304,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", + name: "VPADDD512", argLen: 2, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21214,10 +21319,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked512", + name: "VPADDDMasked128", argLen: 3, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21230,26 +21335,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD512", - argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VRCP14PDMasked512", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VPADDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21257,26 +21351,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD512", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PDMasked512", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VPADDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21284,13 +21382,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPDMasked512", - argLen: 2, - asm: x86.AVCOMPRESSPD, + name: "VPADDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21298,9 +21397,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD512", - argLen: 2, - asm: x86.AVDIVPD, + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21312,9 +21412,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked512", - argLen: 3, - asm: x86.AVDIVPD, + name: "VPADDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21327,15 +21428,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPADDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21343,16 +21444,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPADDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21360,15 +21460,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPADDSB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21376,16 +21475,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPADDSB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21393,32 +21490,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPADDSB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMSUBADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPADDSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21426,25 +21521,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD512", - argLen: 2, + name: "VPADDSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMAXPDMasked512", + name: "VPADDSBMasked512", argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21457,30 +21553,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD512", + name: "VPADDSW128", argLen: 2, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMINPDMasked512", - argLen: 3, + name: "VPADDSW256", + argLen: 2, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21488,10 +21583,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD512", + name: "VPADDSW512", argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21503,23 +21598,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD512", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked512", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VPADDSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21532,10 +21630,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked512", + name: "VPADDSWMasked512", argLen: 3, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21548,26 +21646,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD512", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VPADDW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSQRTPDMasked512", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21575,9 +21676,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD512", - argLen: 2, - asm: x86.AVSUBPD, + name: "VPADDW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21589,9 +21691,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked512", - argLen: 3, - asm: x86.AVSUBPD, + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21604,12 +21707,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, + name: "VPADDWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21617,13 +21723,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked256", - argLen: 2, - asm: x86.AVPABSW, + name: "VPADDWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21631,10 +21739,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW256", + name: "VPAND128", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21646,29 +21754,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked256", - argLen: 3, + name: "VPAND256", + argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCOMPRESSWMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21676,28 +21769,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW256", + name: "VPANDD512", argLen: 2, commutative: true, - asm: x86.AVPCMPEQW, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPGTW256", - argLen: 2, - asm: x86.AVPCMPGTW, + name: "VPANDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21705,14 +21800,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW256", - argLen: 2, + name: "VPANDDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21720,10 +21816,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked256", + name: "VPANDDMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21736,10 +21832,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSW, + name: "VPANDN128", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21751,15 +21846,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPANDN256", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21767,25 +21860,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, + name: "VPANDND512", + argLen: 2, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULHWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPANDNDMasked128", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21798,14 +21889,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULLW, + name: "VPANDNDMasked256", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21813,10 +21904,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPANDNDMasked512", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21829,23 +21919,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD256", + name: "VPANDNQ512", argLen: 2, - asm: x86.AVPMADDWD, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMADDWDMasked256", + name: "VPANDNQMasked128", argLen: 3, - asm: x86.AVPMADDWD, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21858,13 +21948,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW256", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPANDNQMasked256", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21872,13 +21963,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW256", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21886,12 +21978,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW256", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21899,13 +21993,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked256", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPANDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21913,14 +22009,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW256", - argLen: 2, + name: "VPANDQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21928,10 +22025,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked256", + name: "VPANDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21944,9 +22041,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW256", - argLen: 2, - asm: x86.AVPHADDSW, + name: "VPAVGB128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21958,9 +22056,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW256", - argLen: 2, - asm: x86.AVPHSUBSW, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21972,23 +22071,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW256", - argLen: 2, - asm: x86.AVPSUBSW, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBSWMasked256", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPAVGBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22001,13 +22102,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW256", - argLen: 2, - asm: x86.AVPSLLW, + name: "VPAVGBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22015,24 +22118,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLWMasked256", - argLen: 3, - asm: x86.AVPSLLW, + name: "VPAVGBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAW256", - argLen: 2, - asm: x86.AVPSRAW, + name: "VPAVGW128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22044,24 +22149,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAWMasked256", - argLen: 3, - asm: x86.AVPSRAW, + name: "VPAVGW256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVW256", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPAVGW512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22073,15 +22179,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPAVGWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22089,16 +22195,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPAVGWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22106,9 +22211,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked256", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPAVGWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22121,29 +22227,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW256", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSHRDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPCMPEQB128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22151,16 +22242,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPCMPEQB256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22168,24 +22257,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked256", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPCMPEQB512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSIGNW256", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPCMPEQD128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22197,9 +22287,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW256", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPCMPEQD256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22211,41 +22302,44 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked256", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPCMPEQD512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPABSW512", - argLen: 1, - asm: x86.AVPABSW, + name: "VPCMPEQQ128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSWMasked512", - argLen: 2, - asm: x86.AVPABSW, + name: "VPCMPEQQ256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22253,30 +22347,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW512", + name: "VPCMPEQQ512", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDWMasked512", - argLen: 3, + name: "VPCMPEQW128", + argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22284,13 +22377,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSWMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSW, + name: "VPCMPEQW256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22313,75 +22407,69 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW512", + name: "VPCMPGTB128", argLen: 2, - asm: x86.AVPCMPGTW, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPCMPGTB512", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSW512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSW, + name: "VPCMPGTD128", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPCMPGTD256", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22389,30 +22477,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, + name: "VPCMPGTD512", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMULHWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPCMPGTQ128", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22420,59 +22505,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLW, + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULLWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPCMPGTQ512", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMADDWD512", + name: "VPCMPGTW128", argLen: 2, - asm: x86.AVPMADDWD, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, - { - name: "VPMADDWDMasked512", - argLen: 3, - asm: x86.AVPMADDWD, + { + name: "VPCMPGTW256", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22480,22 +22561,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW512", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPCMPGTW512", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTWMasked512", + name: "VPCOMPRESSBMasked128", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPCOMPRESSB, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22507,30 +22589,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPCOMPRESSBMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPCOMPRESSBMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22538,28 +22617,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW512", + name: "VPCOMPRESSDMasked128", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPCOMPRESSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBSWMasked512", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPCOMPRESSDMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22567,87 +22645,83 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW512", + name: "VPCOMPRESSDMasked512", argLen: 2, - asm: x86.AVPSLLW, + asm: x86.AVPCOMPRESSD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLWMasked512", - argLen: 3, - asm: x86.AVPSLLW, + name: "VPCOMPRESSQMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAW512", + name: "VPCOMPRESSQMasked256", argLen: 2, - asm: x86.AVPSRAW, + asm: x86.AVPCOMPRESSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAWMasked512", - argLen: 3, - asm: x86.AVPSRAW, + name: "VPCOMPRESSQMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVW512", + name: "VPCOMPRESSWMasked128", argLen: 2, - asm: x86.AVPSLLVW, + asm: x86.AVPCOMPRESSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHLDVW512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPCOMPRESSWMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22655,16 +22729,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPCOMPRESSWMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22672,14 +22743,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked512", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPDPBUSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22687,24 +22759,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW512", - argLen: 2, - asm: x86.AVPSRAVW, + name: "VPDPBUSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVW512", + name: "VPDPBUSD512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22717,10 +22791,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked512", + name: "VPDPBUSDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22734,14 +22808,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked512", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPDPBUSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22749,28 +22825,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW512", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPDPBUSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBWMasked512", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPDPBUSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22778,12 +22858,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW128", - argLen: 1, - asm: x86.AVPABSW, + name: "VPDPBUSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22791,13 +22874,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked128", - argLen: 2, - asm: x86.AVPABSW, + name: "VPDPBUSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22805,14 +22890,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDW, + name: "VPDPBUSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22820,15 +22907,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPDPBUSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22836,13 +22924,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSWMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSW, + name: "VPDPBUSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22850,14 +22941,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQW, + name: "VPDPWSSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22865,13 +22957,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW128", - argLen: 2, - asm: x86.AVPCMPGTW, + name: "VPDPWSSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22879,14 +22973,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPDPWSSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22894,15 +22989,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPDPWSSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22910,14 +23006,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSW, + name: "VPDPWSSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22925,15 +23023,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPDPWSSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22941,14 +23040,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, + name: "VPDPWSSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22956,15 +23056,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPDPWSSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22972,14 +23072,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLW, + name: "VPDPWSSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22987,15 +23088,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPDPWSSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23003,13 +23105,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD128", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPDPWSSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23017,14 +23122,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked128", - argLen: 3, - asm: x86.AVPMADDWD, + name: "VPDPWSSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23032,40 +23139,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW128", + name: "VPERMB128", argLen: 2, - asm: x86.AVPHADDW, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPHSUBW128", + name: "VPERMB256", argLen: 2, - asm: x86.AVPHSUBW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTW128", - argLen: 1, - asm: x86.AVPOPCNTW, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23073,28 +23167,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked128", + name: "VPERMB512", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDSW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPERMBMasked128", + argLen: 3, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23102,10 +23196,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPERMBMasked256", + argLen: 3, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23118,13 +23211,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW128", - argLen: 2, - asm: x86.AVPHADDSW, + name: "VPERMBMasked512", + argLen: 3, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23132,9 +23226,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW128", + name: "VPERMD256", argLen: 2, - asm: x86.AVPHSUBSW, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23146,23 +23240,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW128", + name: "VPERMD512", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBSWMasked128", + name: "VPERMDMasked256", argLen: 3, - asm: x86.AVPSUBSW, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23175,13 +23269,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW128", - argLen: 2, - asm: x86.AVPSLLW, + name: "VPERMDMasked512", + argLen: 3, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23189,28 +23284,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLWMasked128", - argLen: 3, - asm: x86.AVPSLLW, + name: "VPERMI2B128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAW128", - argLen: 2, - asm: x86.AVPSRAW, + name: "VPERMI2B256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23218,44 +23316,49 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAWMasked128", - argLen: 3, - asm: x86.AVPSRAW, + name: "VPERMI2B512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVW128", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPERMI2BMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHLDVW128", - argLen: 3, + name: "VPERMI2BMasked256", + argLen: 4, resultInArg0: true, - asm: x86.AVPSHLDVW, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23263,10 +23366,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked128", + name: "VPERMI2BMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPSHLDVW, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23280,14 +23383,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked128", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPERMI2D128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23295,24 +23399,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW128", - argLen: 2, - asm: x86.AVPSRAVW, + name: "VPERMI2D256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVW128", + name: "VPERMI2D512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23325,10 +23431,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked128", + name: "VPERMI2DMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23342,14 +23448,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked128", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPERMI2DMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23357,13 +23465,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW128", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPERMI2DMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23371,13 +23482,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW128", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPERMI2PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23385,14 +23498,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked128", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPERMI2PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23400,26 +23514,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512", - argLen: 1, - asm: x86.AVPABSD, + name: "VPERMI2PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSDMasked512", - argLen: 2, - asm: x86.AVPABSD, + name: "VPERMI2PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23427,30 +23547,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPERMI2PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPERMI2PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23458,30 +23581,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512", - argLen: 2, - commutative: true, - asm: x86.AVPANDD, + name: "VPERMI2PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPANDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPERMI2PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23489,28 +23613,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - asm: x86.AVPANDND, + name: "VPERMI2PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPANDNDMasked512", - argLen: 3, - asm: x86.AVPANDND, + name: "VPERMI2PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23518,13 +23646,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSDMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSD, + name: "VPERMI2PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23532,59 +23663,81 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPERMI2PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2Q128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTD512", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPERMI2Q256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSD512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPERMI2Q512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPERMI2QMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23592,30 +23745,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSD, + name: "VPERMI2QMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPERMI2QMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23623,30 +23779,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLD, + name: "VPERMI2W128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULLDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPERMI2W256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23654,30 +23811,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORD512", - argLen: 2, - commutative: true, - asm: x86.AVPORD, + name: "VPERMI2W512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPORDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPERMI2WMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23685,15 +23844,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSD512", - argLen: 3, + name: "VPERMI2WMasked256", + argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23701,10 +23861,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked512", + name: "VPERMI2WMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23718,12 +23878,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD512", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPERMPD256", + argLen: 2, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23731,37 +23892,38 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512", + name: "VPERMPD512", argLen: 2, - asm: x86.AVPOPCNTD, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPROLVD512", - argLen: 2, - asm: x86.AVPROLVD, + name: "VPERMPDMasked256", + argLen: 3, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVDMasked512", + name: "VPERMPDMasked512", argLen: 3, - asm: x86.AVPROLVD, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23774,44 +23936,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD512", + name: "VPERMPS256", argLen: 2, - asm: x86.AVPRORVD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPRORVDMasked512", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPERMPS512", + argLen: 2, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPERMPSMasked256", + argLen: 3, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23819,16 +23979,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPERMPSMasked512", + argLen: 3, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23836,75 +23994,71 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPERMQ256", + argLen: 2, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPBUSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPERMQ512", + argLen: 2, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSLLD512", - argLen: 2, - asm: x86.AVPSLLD, + name: "VPERMQMasked256", + argLen: 3, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLDMasked512", + name: "VPERMQMasked512", argLen: 3, - asm: x86.AVPSLLD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAD512", + name: "VPERMW128", argLen: 2, - asm: x86.AVPSRAD, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23912,14 +24066,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked512", - argLen: 3, - asm: x86.AVPSRAD, + name: "VPERMW256", + argLen: 2, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23927,9 +24080,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD512", + name: "VPERMW512", argLen: 2, - asm: x86.AVPSLLVD, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23941,15 +24094,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPERMWMasked128", + argLen: 3, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23957,16 +24109,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPERMWMasked256", + argLen: 3, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23974,9 +24124,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked512", + name: "VPERMWMasked512", argLen: 3, - asm: x86.AVPSLLVD, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23989,29 +24139,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD512", + name: "VPHADDD128", argLen: 2, - asm: x86.AVPSRAVD, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPHADDD256", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24019,16 +24167,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPHADDSW128", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24036,14 +24181,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked512", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPHADDSW256", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24051,28 +24195,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD512", + name: "VPHADDW128", argLen: 2, - asm: x86.AVPSUBD, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBDMasked512", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPHADDW256", + argLen: 2, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24080,15 +24223,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPHSUBD128", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24096,16 +24237,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPHSUBD256", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24113,30 +24251,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORD512", - argLen: 2, - commutative: true, - asm: x86.AVPXORD, + name: "VPHSUBSW128", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPXORDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORD, + name: "VPHSUBSW256", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24144,12 +24279,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD128", - argLen: 1, - asm: x86.AVPABSD, + name: "VPHSUBW128", + argLen: 2, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24157,13 +24293,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128", + name: "VPHSUBW256", argLen: 2, - asm: x86.AVPABSD, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24171,10 +24307,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD128", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPMADDUBSW128", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24186,15 +24321,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPMADDUBSW256", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24202,10 +24335,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24218,9 +24364,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", + name: "VPMADDUBSWMasked256", argLen: 3, - asm: x86.AVPANDND, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24233,13 +24379,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSDMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSD, + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24247,10 +24394,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPMADDWD128", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24262,9 +24408,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD128", + name: "VPMADDWD256", argLen: 2, - asm: x86.AVPCMPGTD, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24276,25 +24422,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPMADDWD512", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPMADDWDMasked128", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24307,14 +24451,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSD, + name: "VPMADDWDMasked256", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24322,10 +24466,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPMADDWDMasked512", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24338,10 +24481,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ128", + name: "VPMAXSB128", argLen: 2, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24353,10 +24496,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD128", + name: "VPMAXSB256", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24368,26 +24511,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128", - argLen: 3, + name: "VPMAXSB512", + argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORDMasked128", + name: "VPMAXSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24400,15 +24542,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMAXSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24416,16 +24558,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMAXSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24433,9 +24574,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD128", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPMAXSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24447,9 +24589,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD128", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPMAXSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24461,12 +24604,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD128", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPMAXSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24474,13 +24619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPMAXSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24488,23 +24635,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD128", - argLen: 2, - asm: x86.AVPROLVD, + name: "VPMAXSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVDMasked128", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPMAXSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24517,9 +24667,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD128", - argLen: 2, - asm: x86.AVPRORVD, + name: "VPMAXSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24531,63 +24682,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked128", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPMAXSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMAXSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMAXSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPBUSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24595,16 +24728,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPMAXSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24612,13 +24744,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLD128", - argLen: 2, - asm: x86.AVPSLLD, + name: "VPMAXSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24626,24 +24760,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked128", - argLen: 3, - asm: x86.AVPSLLD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + name: "VPMAXSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAD128", - argLen: 2, - asm: x86.AVPSRAD, + name: "VPMAXSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24655,14 +24790,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked128", - argLen: 3, - asm: x86.AVPSRAD, + name: "VPMAXSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24670,13 +24805,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD128", - argLen: 2, - asm: x86.AVPSLLVD, + name: "VPMAXSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24684,15 +24821,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMAXSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24700,16 +24837,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMAXSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24717,14 +24853,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked128", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24732,9 +24868,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD128", - argLen: 2, - asm: x86.AVPSRAVD, + name: "VPMAXUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24746,32 +24883,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMAXUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMAXUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24779,9 +24914,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked128", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24794,13 +24930,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND128", - argLen: 2, - asm: x86.AVPSIGND, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24808,9 +24946,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD128", - argLen: 2, - asm: x86.AVPSUBD, + name: "VPMAXUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24822,14 +24961,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24837,32 +24976,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMAXUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPBUSDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMAXUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24870,10 +25007,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128", + name: "VPMAXUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24886,12 +25023,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD256", - argLen: 1, - asm: x86.AVPABSD, + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24899,55 +25039,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256", - argLen: 2, - asm: x86.AVPABSD, + name: "VPMAXUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDD256", + name: "VPMAXUQ256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDDMasked256", - argLen: 3, + name: "VPMAXUQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPANDDMasked256", + name: "VPMAXUQMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24960,9 +25100,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked256", - argLen: 3, - asm: x86.AVPANDND, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24975,13 +25116,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSDMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSD, + name: "VPMAXUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24989,10 +25132,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD256", + name: "VPMAXUW128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25004,9 +25147,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD256", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPMAXUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25018,25 +25162,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD256", + name: "VPMAXUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXSDMasked256", + name: "VPMAXUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25049,14 +25193,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD256", - argLen: 2, + name: "VPMAXUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25064,10 +25209,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked256", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25080,10 +25225,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ256", + name: "VPMINSB128", argLen: 2, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25095,10 +25240,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD256", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25110,26 +25255,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256", - argLen: 3, + name: "VPMINSB512", + argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORDMasked256", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25142,15 +25286,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMINSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25158,16 +25302,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMINSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25175,9 +25318,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD256", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPMINSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25189,9 +25333,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD256", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPMINSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25203,12 +25348,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD256", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPMINSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25216,13 +25363,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPMINSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25230,23 +25379,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD256", - argLen: 2, - asm: x86.AVPROLVD, + name: "VPMINSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVDMasked256", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPMINSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25259,9 +25411,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD256", - argLen: 2, - asm: x86.AVPRORVD, + name: "VPMINSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25273,47 +25426,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked256", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMINSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25321,15 +25472,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPMINSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25337,16 +25488,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPMINSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25354,9 +25504,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLD256", - argLen: 2, - asm: x86.AVPSLLD, + name: "VPMINSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25368,24 +25519,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked256", - argLen: 3, - asm: x86.AVPSLLD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSRAD256", - argLen: 2, - asm: x86.AVPSRAD, + name: "VPMINSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25397,44 +25534,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked256", - argLen: 3, - asm: x86.AVPSRAD, + name: "VPMINSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSLLVD256", - argLen: 2, - asm: x86.AVPSLLVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDVD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMINSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25442,16 +25565,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMINSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25459,9 +25581,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked256", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPMINSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25474,9 +25597,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD256", - argLen: 2, - asm: x86.AVPSRAVD, + name: "VPMINUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25488,15 +25612,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMINUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25504,16 +25627,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMINUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25521,9 +25658,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked256", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPMINUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25536,13 +25674,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND256", - argLen: 2, - asm: x86.AVPSIGND, + name: "VPMINUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25550,9 +25690,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD256", - argLen: 2, - asm: x86.AVPSUBD, + name: "VPMINUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25564,14 +25705,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked256", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25579,32 +25720,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPBUSDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25612,10 +25751,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256", + name: "VPMINUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25628,68 +25767,71 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ128", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPMINUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSQMasked128", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPMINUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQ128", + name: "VPMINUQ256", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQMasked128", - argLen: 3, + name: "VPMINUQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPANDQMasked128", + name: "VPMINUQMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25702,9 +25844,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25717,13 +25860,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSQMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSQ, + name: "VPMINUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25731,10 +25876,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ128", + name: "VPMINUW128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25746,9 +25891,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ128", - argLen: 2, - asm: x86.AVPCMPGTQ, + name: "VPMINUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25760,10 +25906,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", + name: "VPMINUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25775,10 +25921,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked128", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25791,25 +25937,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128", - argLen: 2, + name: "VPMINUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSQMasked128", + name: "VPMINUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25822,15 +25969,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked128", - argLen: 3, + name: "VPMULDQ128", + argLen: 2, commutative: true, asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25838,41 +25984,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", + name: "VPMULDQ256", argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULLQMasked128", - argLen: 3, + name: "VPMULDQ512", + argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORQMasked128", + name: "VPMULDQMasked128", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25885,26 +26030,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPMULDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTQMasked128", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPMULDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25912,28 +26062,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ128", - argLen: 2, - asm: x86.AVPROLVQ, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVQMasked128", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPMULHUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25941,9 +26092,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ128", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25955,9 +26107,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked128", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPMULHUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25970,13 +26123,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ128", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25984,53 +26139,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked128", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPMULHUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSRAQ128", - argLen: 2, - asm: x86.AVPSRAQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAQMasked128", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVQ128", - argLen: 2, - asm: x86.AVPSLLVQ, + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26042,32 +26185,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPMULHW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26075,9 +26216,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked128", - argLen: 3, - asm: x86.AVPSLLVQ, + name: "VPMULHWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26090,29 +26232,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ128", - argLen: 2, - asm: x86.AVPSRAVQ, + name: "VPMULHWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVQ128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPMULLD128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26120,16 +26263,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPMULLD256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26137,28 +26278,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked128", - argLen: 3, - asm: x86.AVPSRAVQ, + name: "VPMULLD512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBQ128", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPMULLDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26166,9 +26309,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked128", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPMULLDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26181,10 +26325,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked128", + name: "VPMULLDMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26197,12 +26341,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPMULLQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26210,39 +26356,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPMULLQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQ256", + name: "VPMULLQ512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQMasked256", + name: "VPMULLQMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26255,10 +26402,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256", + name: "VPMULLQMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26271,9 +26418,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMULLQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26286,24 +26434,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSQMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQQ256", + name: "VPMULLW128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26315,9 +26449,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", - argLen: 2, - asm: x86.AVPCMPGTQ, + name: "VPMULLW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26329,10 +26464,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", + name: "VPMULLW512", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26344,10 +26479,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256", + name: "VPMULLWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26360,25 +26495,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", - argLen: 2, + name: "VPMULLWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSQMasked256", + name: "VPMULLWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26391,15 +26527,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked256", - argLen: 3, + name: "VPMULUDQ128", + argLen: 2, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26407,10 +26542,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", + name: "VPMULUDQ256", argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULUDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULUDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26422,10 +26572,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", + name: "VPMULUDQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26438,10 +26588,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", + name: "VPMULUDQMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26454,40 +26604,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPMULUDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTQMasked256", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPROLVQ256", - argLen: 2, - asm: x86.AVPROLVQ, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26495,43 +26646,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked256", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORVQ256", + name: "VPOPCNTBMasked128", argLen: 2, - asm: x86.AVPRORVQ, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPRORVQMasked256", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26539,13 +26687,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ256", + name: "VPOPCNTBMasked512", argLen: 2, - asm: x86.AVPSLLQ, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26553,14 +26701,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked256", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPOPCNTD128", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26568,12 +26714,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ256", - argLen: 2, - asm: x86.AVPSRAQ, + name: "VPOPCNTD256", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26582,14 +26727,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked256", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPOPCNTD512", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26597,13 +26740,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ256", + name: "VPOPCNTDMasked128", argLen: 2, - asm: x86.AVPSLLVQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26611,15 +26754,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPOPCNTDMasked256", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26627,16 +26768,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPOPCNTDMasked512", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26644,28 +26782,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked256", - argLen: 3, - asm: x86.AVPSLLVQ, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRAVQ256", - argLen: 2, - asm: x86.AVPSRAVQ, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26673,32 +26808,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPOPCNTQMasked128", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26706,14 +26835,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked256", - argLen: 3, - asm: x86.AVPSRAVQ, + name: "VPOPCNTQMasked256", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26721,13 +26849,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", + name: "VPOPCNTQMasked512", argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26735,40 +26863,35 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPXORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPABSQ512", + name: "VPOPCNTW512", argLen: 1, - asm: x86.AVPABSQ, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26779,9 +26902,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", + name: "VPOPCNTWMasked128", argLen: 2, - asm: x86.AVPABSQ, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26793,30 +26916,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPADDQ, + name: "VPOPCNTWMasked256", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDQ, + name: "VPOPCNTWMasked512", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26824,30 +26944,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", + name: "VPOR128", argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPANDQMasked512", - argLen: 3, + name: "VPOR256", + argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26855,9 +26974,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", - argLen: 2, - asm: x86.AVPANDNQ, + name: "VPORD512", + argLen: 2, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26869,9 +26989,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26884,13 +27005,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSQMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSQ, + name: "VPORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26898,39 +27021,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ512", - argLen: 2, + name: "VPORDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQ512", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSQ512", + name: "VPORQ512", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26942,10 +27052,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26958,25 +27068,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMINSQMasked512", + name: "VPORQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26989,25 +27084,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMULDQMasked512", + name: "VPORQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27020,10 +27100,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPROLVD128", + argLen: 2, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27035,26 +27114,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPROLVD256", + argLen: 2, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPORQ, + name: "VPROLVD512", + argLen: 2, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27066,10 +27142,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPORQ, + name: "VPROLVDMasked128", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27082,26 +27157,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPROLVDMasked256", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTQMasked512", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPROLVDMasked512", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27109,7 +27187,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ512", + name: "VPROLVQ128", argLen: 2, asm: x86.AVPROLVQ, reg: regInfo{ @@ -27123,24 +27201,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked512", - argLen: 3, + name: "VPROLVQ256", + argLen: 2, asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORVQ512", + name: "VPROLVQ512", argLen: 2, - asm: x86.AVPRORVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27152,9 +27229,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked512", + name: "VPROLVQMasked128", argLen: 3, - asm: x86.AVPRORVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27167,42 +27244,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ512", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPROLVQMasked256", + argLen: 3, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLQMasked512", + name: "VPROLVQMasked512", argLen: 3, - asm: x86.AVPSLLQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAQ512", + name: "VPRORVD128", argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27210,14 +27288,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked512", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPRORVD256", + argLen: 2, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27225,9 +27302,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ512", + name: "VPRORVD512", argLen: 2, - asm: x86.AVPSLLVQ, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27239,15 +27316,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPRORVDMasked128", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27255,16 +27331,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPRORVDMasked256", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27272,9 +27346,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked512", + name: "VPRORVDMasked512", argLen: 3, - asm: x86.AVPSLLVQ, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27287,9 +27361,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ512", + name: "VPRORVQ128", argLen: 2, - asm: x86.AVPSRAVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27301,42 +27375,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPRORVQ256", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPRORVQ512", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRAVQMasked512", + name: "VPRORVQMasked128", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27349,23 +27418,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPRORVQMasked256", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBQMasked512", + name: "VPRORVQMasked512", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27378,30 +27448,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSHLDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPXORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSHLDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27409,12 +27480,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSHLDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27422,13 +27496,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", - argLen: 2, - asm: x86.AVPABSB, + name: "VPSHLDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27436,14 +27513,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPSHLDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27451,15 +27530,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSHLDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27467,14 +27547,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND128", - argLen: 2, - commutative: true, - asm: x86.AVPAND, + name: "VPSHLDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27482,13 +27563,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", - argLen: 2, - asm: x86.AVPANDN, + name: "VPSHLDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27496,28 +27579,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSBMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQB128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPSHLDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27525,13 +27595,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB128", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPSHLDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27539,14 +27612,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHLDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27554,15 +27629,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHLDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27570,14 +27646,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSHLDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27585,15 +27662,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSHLDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27601,14 +27678,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSHLDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27616,26 +27694,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSHLDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTBMasked128", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPSHLDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27643,14 +27728,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSHLDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27658,15 +27745,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSHRDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27674,13 +27761,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPSHRDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27688,14 +27777,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSHRDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27703,13 +27793,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPSHRDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27717,13 +27810,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPSHRDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27731,14 +27827,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSHRDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27746,14 +27844,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR128", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, + name: "VPSHRDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27761,12 +27860,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB256", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSHRDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27774,28 +27876,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, + name: "VPSHRDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, - }, - { - name: "VPADDB256", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + }, + { + name: "VPSHRDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27803,15 +27909,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSHRDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27819,14 +27926,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", - argLen: 2, - commutative: true, - asm: x86.AVPAND, + name: "VPSHRDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27834,13 +27943,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - asm: x86.AVPANDN, + name: "VPSHRDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27848,13 +27959,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSBMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSB, + name: "VPSHRDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27862,14 +27975,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPSHRDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27877,13 +27991,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPSHRDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27891,14 +28008,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27906,15 +28025,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27922,10 +28042,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSIGNB128", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27937,15 +28056,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27953,10 +28070,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27968,26 +28084,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSIGND256", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTBMasked256", + name: "VPSIGNW128", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27995,10 +28112,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSIGNW256", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28010,15 +28126,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLD128", + argLen: 2, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28026,9 +28140,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", + name: "VPSLLD256", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28040,68 +28154,68 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSLLD512", + argLen: 2, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSIGNB256", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPSLLDMasked128", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBB256", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPSLLDMasked256", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBBMasked256", + name: "VPSLLDMasked512", argLen: 3, - asm: x86.AVPSUBB, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPXOR256", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, + name: "VPSLLQ128", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28113,41 +28227,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSLLQ256", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSBMasked512", + name: "VPSLLQ512", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPSLLQMasked128", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28155,69 +28270,67 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSLLQMasked256", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCOMPRESSBMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSB, + name: "VPSLLQMasked512", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPEQB512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPSLLVD128", + argLen: 2, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTB512", + name: "VPSLLVD256", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSB512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSLLVD512", + argLen: 2, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28229,10 +28342,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSLLVDMasked128", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28245,25 +28357,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSLLVDMasked256", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSLLVDMasked512", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28276,26 +28387,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSLLVQ128", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTBMasked512", + name: "VPSLLVQ256", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28303,10 +28415,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLVQ512", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28318,10 +28429,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLVQMasked128", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28334,23 +28444,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPSLLVQMasked256", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBSBMasked512", + name: "VPSLLVQMasked512", argLen: 3, - asm: x86.AVPSUBSB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28363,9 +28474,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB512", + name: "VPSLLVW128", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28377,40 +28488,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSLLVW256", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGW256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSLLVW512", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSLLVWMasked128", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28423,14 +28531,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSLLVWMasked256", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28438,10 +28546,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSLLVWMasked512", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28454,10 +28561,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSLLW128", + argLen: 2, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28469,15 +28575,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSLLW256", + argLen: 2, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28485,44 +28589,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSLLW512", + argLen: 2, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULHUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSLLWMasked128", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMW256", - argLen: 2, - asm: x86.AVPERMW, + name: "VPSLLWMasked256", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28530,32 +28633,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2W256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSLLWMasked512", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2WMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAD128", + argLen: 2, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28563,14 +28662,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMWMasked256", - argLen: 3, - asm: x86.AVPERMW, + name: "VPSRAD256", + argLen: 2, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28578,23 +28676,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW256", + name: "VPSRAD512", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLWMasked256", + name: "VPSRADMasked128", argLen: 3, - asm: x86.AVPSRLW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28607,13 +28705,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW256", - argLen: 2, - asm: x86.AVPSRLVW, + name: "VPSRADMasked256", + argLen: 3, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28621,29 +28720,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked256", + name: "VPSRADMasked512", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGW512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAQ128", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28651,30 +28749,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAQ256", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAQ512", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28682,30 +28777,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAQMasked128", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAQMasked256", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28713,46 +28807,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAQMasked512", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULHUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVD128", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULHUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVD256", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28760,9 +28850,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMW512", + name: "VPSRAVD512", argLen: 2, - asm: x86.AVPERMW, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28774,15 +28864,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2W512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAVDMasked128", + argLen: 3, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28790,16 +28879,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2WMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAVDMasked256", + argLen: 3, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28807,9 +28894,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMWMasked512", + name: "VPSRAVDMasked512", argLen: 3, - asm: x86.AVPERMW, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28822,13 +28909,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW512", + name: "VPSRAVQ128", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28836,14 +28923,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLWMasked512", - argLen: 3, - asm: x86.AVPSRLW, + name: "VPSRAVQ256", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28851,9 +28937,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW512", + name: "VPSRAVQ512", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28865,9 +28951,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked512", + name: "VPSRAVQMasked128", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28880,14 +28966,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW128", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAVQMasked256", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28895,10 +28981,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAVQMasked512", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28911,56 +28996,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAVW128", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAVW256", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAVW512", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAVWMasked128", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28973,14 +29053,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVWMasked256", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28988,10 +29068,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVWMasked512", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29004,29 +29083,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMW128", + name: "VPSRAW128", argLen: 2, - asm: x86.AVPERMW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPERMI2W128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2W, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29034,31 +29097,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2WMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAW256", + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPERMWMasked128", - argLen: 3, - asm: x86.AVPERMW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29066,23 +29111,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW128", + name: "VPSRAW512", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLWMasked128", + name: "VPSRAWMasked128", argLen: 3, - asm: x86.AVPSRLW, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29095,13 +29140,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW128", - argLen: 2, - asm: x86.AVPSRLVW, + name: "VPSRAWMasked256", + argLen: 3, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29109,45 +29155,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked128", + name: "VPSRAWMasked512", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLD128", + argLen: 2, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLD256", + argLen: 2, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29155,14 +29198,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLD512", + argLen: 2, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29170,29 +29212,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLDMasked128", + argLen: 3, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMD512", - argLen: 2, - asm: x86.AVPERMD, + name: "VPSRLDMasked256", + argLen: 3, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29200,13 +29242,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS512", - argLen: 2, - asm: x86.AVPERMPS, + name: "VPSRLDMasked512", + argLen: 3, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29214,15 +29257,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLQ128", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29230,15 +29271,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29246,95 +29285,89 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2DMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMPSMasked512", + name: "VPSRLQMasked256", argLen: 3, - asm: x86.AVPERMPS, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMDMasked512", + name: "VPSRLQMasked512", argLen: 3, - asm: x86.AVPERMD, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLD512", + name: "VPSRLVD128", argLen: 2, - asm: x86.AVPSRLD, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLDMasked512", - argLen: 3, - asm: x86.AVPSRLD, + name: "VPSRLVD256", + argLen: 2, + asm: x86.AVPSRLVD, reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -29353,7 +29386,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked512", + name: "VPSRLVDMasked128", argLen: 3, asm: x86.AVPSRLVD, reg: regInfo{ @@ -29368,14 +29401,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29383,10 +29416,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29399,10 +29431,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLVQ128", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29414,26 +29445,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSRLVQ256", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29445,31 +29459,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLVQ512", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2D128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLVQMasked128", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29477,16 +29488,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29494,16 +29503,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLVQMasked512", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29511,28 +29518,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD128", + name: "VPSRLVW128", argLen: 2, - asm: x86.AVPSRLD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLDMasked128", - argLen: 3, - asm: x86.AVPSRLD, + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29540,23 +29546,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD128", + name: "VPSRLVW512", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLVDMasked128", + name: "VPSRLVWMasked128", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29569,14 +29575,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVWMasked256", + argLen: 3, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29584,10 +29590,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVWMasked512", + argLen: 3, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29600,10 +29605,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29615,15 +29619,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLW256", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29631,58 +29633,72 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSRLW512", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMD256", - argLen: 2, - asm: x86.AVPERMD, + name: "VPSRLWMasked128", + argLen: 3, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMPS256", - argLen: 2, - asm: x86.AVPERMPS, + name: "VPSRLWMasked256", + argLen: 3, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLWMasked512", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29690,15 +29706,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29706,33 +29720,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2DMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSUBBMasked128", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29740,9 +29749,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPSMasked256", + name: "VPSUBBMasked256", argLen: 3, - asm: x86.AVPERMPS, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29755,9 +29764,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMDMasked256", + name: "VPSUBBMasked512", argLen: 3, - asm: x86.AVPERMD, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29770,9 +29779,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD256", + name: "VPSUBD128", argLen: 2, - asm: x86.AVPSRLD, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29784,38 +29793,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLDMasked256", - argLen: 3, - asm: x86.AVPSRLD, + name: "VPSUBD256", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLVD256", + name: "VPSUBD512", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLVDMasked256", + name: "VPSUBDMasked128", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29828,25 +29836,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBDMasked256", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBDMasked512", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29859,30 +29866,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBQ128", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBQ256", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29890,31 +29894,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29922,15 +29923,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29938,16 +29938,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29955,16 +29953,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29972,9 +29967,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ128", + name: "VPSUBSB256", argLen: 2, - asm: x86.AVPSRLQ, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29986,14 +29981,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked128", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30001,13 +29995,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ128", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30015,9 +30010,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked128", + name: "VPSUBSBMasked256", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30030,30 +30025,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBSW128", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30061,41 +30054,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBSW256", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINUQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBSW512", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULUDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSUBSWMasked128", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30108,43 +30097,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD256", - argLen: 2, - asm: x86.AVPERMPD, + name: "VPSUBSWMasked256", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMQ256", - argLen: 2, - asm: x86.AVPERMQ, + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMI2PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBW128", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30152,15 +30141,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBW256", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30168,33 +30155,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2QMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBWMasked128", + argLen: 3, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30202,9 +30184,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked256", + name: "VPSUBWMasked256", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30217,9 +30199,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked256", + name: "VPSUBWMasked512", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30232,9 +30214,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ256", - argLen: 2, - asm: x86.AVPSRLQ, + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30246,38 +30229,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked256", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLVQ256", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLVQMasked256", - argLen: 3, - asm: x86.AVPSRLVQ, + name: "VPXORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30290,25 +30275,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", - argLen: 2, + name: "VPXORDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUQMasked512", + name: "VPXORDMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30321,10 +30307,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", + name: "VPXORQ512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30336,10 +30322,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", + name: "VPXORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30352,25 +30338,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", - argLen: 2, + name: "VPXORQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULUDQMasked512", + name: "VPXORQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30383,13 +30370,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD512", - argLen: 2, - asm: x86.AVPERMPD, + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30397,13 +30383,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQ512", - argLen: 2, - asm: x86.AVPERMQ, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30411,31 +30396,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VRCP14PD512", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30443,16 +30423,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30460,16 +30437,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VRCP14PDMasked512", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30477,14 +30451,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked512", - argLen: 3, - asm: x86.AVPERMPD, + name: "VRCP14PS512", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30492,14 +30478,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked512", - argLen: 3, - asm: x86.AVPERMQ, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30507,12 +30492,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ512", + name: "VRCP14PSMasked512", argLen: 2, - asm: x86.AVPSRLQ, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCPPS128", + argLen: 1, + asm: x86.AVRCPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCPPS256", + argLen: 1, + asm: x86.AVRCPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30521,14 +30545,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked512", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30536,13 +30558,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ512", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VRSQRT14PD512", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30550,14 +30571,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked512", - argLen: 3, - asm: x86.AVPSRLVQ, + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30565,14 +30585,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30580,15 +30599,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30596,13 +30613,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB128", - argLen: 2, - asm: x86.AVGF2P8MULB, + name: "VRSQRT14PS512", + argLen: 1, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30610,14 +30626,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked128", - argLen: 3, - asm: x86.AVGF2P8MULB, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30625,14 +30640,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUB, + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30640,15 +30654,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VRSQRT14PSMasked512", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30656,14 +30668,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUB, + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30671,15 +30681,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUB, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30687,9 +30694,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMB128", + name: "VSCALEFPD128", argLen: 2, - asm: x86.AVPERMB, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30701,42 +30708,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2B128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSCALEFPD256", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2BMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSCALEFPD512", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMBMasked128", + name: "VSCALEFPDMasked128", argLen: 3, - asm: x86.AVPERMB, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30749,13 +30751,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW128", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VSCALEFPDMasked256", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30763,9 +30766,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked128", + name: "VSCALEFPDMasked512", argLen: 3, - asm: x86.AVPMADDUBSW, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30778,40 +30781,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VGF2P8MULB256", + name: "VSCALEFPS512", argLen: 2, - asm: x86.AVGF2P8MULB, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30823,9 +30823,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked256", + name: "VSCALEFPSMasked128", argLen: 3, - asm: x86.AVGF2P8MULB, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked512", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30838,14 +30868,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30853,15 +30881,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSQRTPD256", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30869,30 +30894,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUB, + name: "VSQRTPD512", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUB, + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30900,29 +30921,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMB256", + name: "VSQRTPDMasked256", argLen: 2, - asm: x86.AVPERMB, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMI2B256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSQRTPDMasked512", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30930,16 +30949,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2BMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSQRTPS128", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30947,14 +30962,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMBMasked256", - argLen: 3, - asm: x86.AVPERMB, + name: "VSQRTPS256", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30962,28 +30975,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW256", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VSQRTPSMasked128", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30991,30 +31002,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VSQRTPSMasked256", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPAVGBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VSQRTPSMasked512", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31022,28 +31030,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB512", + name: "VSUBPD128", argLen: 2, - asm: x86.AVGF2P8MULB, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8MULBMasked512", - argLen: 3, - asm: x86.AVGF2P8MULB, + name: "VSUBPD256", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31051,10 +31058,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSUBPD512", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31066,10 +31072,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31082,25 +31087,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUB, + name: "VSUBPDMasked256", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINUBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUB, + name: "VSUBPDMasked512", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31113,29 +31117,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMB512", + name: "VSUBPS128", argLen: 2, - asm: x86.AVPERMB, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMI2B512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSUBPS256", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31143,26 +31145,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2BMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSUBPS512", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMBMasked512", + name: "VSUBPSMasked128", argLen: 3, - asm: x86.AVPERMB, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31175,23 +31174,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW512", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VSUBPSMasked256", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMADDUBSWMasked512", + name: "VSUBPSMasked512", argLen: 3, - asm: x86.AVPMADDUBSW, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31204,28 +31204,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS512", + name: "VROUNDPS128", auxType: auxInt8, argLen: 1, - asm: x86.AVRNDSCALEPS, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked512", + name: "VROUNDPS256", auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + argLen: 1, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31233,28 +31232,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS512", + name: "VROUNDPD128", auxType: auxInt8, argLen: 1, - asm: x86.AVREDUCEPS, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPSMasked512", + name: "VROUNDPD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + argLen: 1, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31262,57 +31260,52 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPSMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VROUNDPS128", + name: "VRNDSCALEPS512", auxType: auxInt8, argLen: 1, - asm: x86.AVROUNDPS, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VRNDSCALEPS128", + name: "VRNDSCALEPD128", auxType: auxInt8, argLen: 1, - asm: x86.AVRNDSCALEPS, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31323,25 +31316,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked128", + name: "VRNDSCALEPD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VREDUCEPS128", + name: "VRNDSCALEPD512", auxType: auxInt8, argLen: 1, - asm: x86.AVREDUCEPS, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31352,10 +31344,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128", + name: "VRNDSCALEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVREDUCEPS, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31367,31 +31359,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VCMPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31399,30 +31374,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPSMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPS256", + name: "VRNDSCALEPDMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPS, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31430,24 +31404,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS256", + name: "VRNDSCALEPDMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked256", + name: "VRNDSCALEPDMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31459,7 +31434,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256", + name: "VREDUCEPS128", auxType: auxInt8, argLen: 1, asm: x86.AVREDUCEPS, @@ -31473,92 +31448,84 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256", + name: "VREDUCEPS256", auxType: auxInt8, - argLen: 2, + argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VDPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, + name: "VREDUCEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPSMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VEXTRACTF128128", + name: "VREDUCEPD512", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTF128, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VINSERTF128256", + name: "VREDUCEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTF128, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31566,13 +31533,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VROUNDPD128", + name: "VREDUCEPSMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31580,24 +31548,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD128", + name: "VREDUCEPSMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked128", + name: "VREDUCEPDMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31609,21 +31578,22 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD128", + name: "VREDUCEPDMasked256", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPDMasked128", + name: "VREDUCEPDMasked512", auxType: auxInt8, argLen: 2, asm: x86.AVREDUCEPD, @@ -31638,11 +31608,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPD128", + name: "VDPPS128", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVDPPD, + asm: x86.AVDPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31654,11 +31624,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", + name: "VDPPS256", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVDPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31670,30 +31640,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked128", + name: "VDPPD128", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVDPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31701,57 +31672,63 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31759,7 +31736,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", + name: "VCMPPD512", auxType: auxInt8, argLen: 2, commutative: true, @@ -31770,16 +31747,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPDMasked256", + name: "VCMPPSMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31792,73 +31769,84 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VCMPPSMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VCMPPSMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VCMPPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPD512", + name: "VCMPPDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31866,11 +31854,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked512", + name: "VPCMPBMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31883,11 +31871,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked256", + name: "VPCMPBMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31900,14 +31888,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31915,25 +31905,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDW, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDW, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31941,30 +31934,33 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDW, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDW, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31972,16 +31968,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPWMasked512", + name: "VPCMPDMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31994,14 +31990,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32009,25 +32007,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDW, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDW, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32035,30 +32036,33 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDW, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDW, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32066,16 +32070,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPWMasked128", + name: "VPCMPUBMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32088,28 +32092,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRW128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRW, + name: "VPCMPUBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPUWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32117,40 +32126,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRW, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDW, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDWMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDW, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32158,30 +32172,33 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDW, + name: "VPCMPUDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDWMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDW, + name: "VPCMPUDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32189,16 +32206,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPDMasked512", + name: "VPCMPUQMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPD, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32211,14 +32228,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + name: "VPCMPUQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32226,42 +32245,46 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPROLDMasked512", + name: "VGF2P8AFFINEQB128", auxType: auxInt8, argLen: 2, - asm: x86.AVPROLD, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORD512", + name: "VGF2P8AFFINEQB256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32269,25 +32292,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORDMasked512", + name: "VGF2P8AFFINEQB512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDD512", + name: "VGF2P8AFFINEINVQB128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32299,26 +32322,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked512", + name: "VGF2P8AFFINEINVQB256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDD512", + name: "VGF2P8AFFINEINVQB512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32330,10 +32352,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked512", + name: "VGF2P8AFFINEINVQBMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32346,74 +32368,63 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPEXTRD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRD, + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPD128", + name: "VGF2P8AFFINEINVQBMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLD128", + name: "VGF2P8AFFINEQBMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLDMasked128", + name: "VGF2P8AFFINEQBMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLD, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32421,28 +32432,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD128", + name: "VGF2P8AFFINEQBMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPRORDMasked128", + name: "VEXTRACTF128128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPRORD, + argLen: 1, + asm: x86.AVEXTRACTF128, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32450,13 +32462,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRD128", + name: "VEXTRACTI128128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRD, + argLen: 1, + asm: x86.AVEXTRACTI128, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -32465,78 +32476,70 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD128", + name: "VPEXTRB128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHLDDMasked128", + name: "VPEXTRW128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHRDD128", + name: "VPEXTRD128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDD, + argLen: 1, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHRDDMasked128", + name: "VPEXTRQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 1, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32544,10 +32547,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD256", + name: "VPCMPUB256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPD, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32559,136 +32562,134 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD256", + name: "VPCMPUB512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPROLDMasked256", + name: "VPCMPUW128", auxType: auxInt8, argLen: 2, - asm: x86.AVPROLD, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORD256", + name: "VPCMPUW256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORDMasked256", + name: "VPCMPUW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDD256", + name: "VPCMPUD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDD, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDDMasked256", + name: "VPCMPUD256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDD256", + name: "VPCMPUD512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDD, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDDMasked256", + name: "VPCMPUQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPQMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32696,24 +32697,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VPCMPUQ512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRQ, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPQ128", + name: "VPCMPB128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPQ, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32725,151 +32727,149 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ128", + name: "VPCMPB256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLQ, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPROLQMasked128", + name: "VPCMPB512", auxType: auxInt8, argLen: 2, - asm: x86.AVPROLQ, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORQ128", + name: "VPCMPW128", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORQ, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORQMasked128", + name: "VPCMPW256", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORQ, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPINSRQ128", + name: "VPCMPW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRQ, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDQ128", + name: "VPCMPD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDQ, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDQMasked128", + name: "VPCMPD256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDQ128", + name: "VPCMPD512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDQ, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDQMasked128", + name: "VPCMPQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPQMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32877,7 +32877,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ256", + name: "VPCMPQ512", auxType: auxInt8, argLen: 2, asm: x86.AVPCMPQ, @@ -32892,10 +32892,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ256", + name: "VPROLD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32906,25 +32906,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked256", + name: "VPROLD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLQ, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORQ256", + name: "VPROLD512", auxType: auxInt8, argLen: 1, - asm: x86.AVPRORQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32935,29 +32934,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked256", + name: "VPROLQ128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPRORQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDQ256", + name: "VPROLQ256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32965,46 +32962,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked256", + name: "VPROLQ512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDQ256", + name: "VPROLDMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDQMasked256", + name: "VPROLDMasked256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33012,48 +33006,47 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPROLDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ512", + name: "VPROLQMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPQ, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLQ512", + name: "VPROLQMasked256", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -33073,10 +33066,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ512", + name: "VPRORD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPRORQ, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33087,29 +33080,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked512", + name: "VPRORD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPRORQ, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDQ512", + name: "VPRORD512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33117,30 +33108,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked512", + name: "VPRORQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDQ512", + name: "VPRORQ256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDQ, + argLen: 1, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQ512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33148,15 +33150,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked512", + name: "VPRORDMasked128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33164,60 +33165,59 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPRORDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRB128", + name: "VPRORDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRB, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPB128", + name: "VPRORQMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRB128", + name: "VPRORQMasked256", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRB, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33225,30 +33225,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPRORQMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VEXTRACTI128128", + name: "VINSERTF128256", auxType: auxInt8, - argLen: 1, - asm: x86.AVEXTRACTI128, + argLen: 2, + asm: x86.AVINSERTF128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33256,29 +33255,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB256", + name: "VINSERTI128256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVINSERTI128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VINSERTI128256", + name: "VPINSRB128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTI128, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33286,203 +33285,190 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPB512", + name: "VPINSRD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPINSRQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUW256", + name: "VPSHLDW128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPSHLDW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUW512", + name: "VPSHLDW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPSHLDD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUW128", + name: "VPSHLDD256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUW, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPSHLDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUD512", + name: "VPSHLDQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUD, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPSHLDQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUD128", + name: "VPSHLDQ512", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUD, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPSHLDWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33490,31 +33476,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUD256", + name: "VPSHLDWMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUD, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPSHLDWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33522,31 +33508,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ128", + name: "VPSHLDDMasked128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPSHLDDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33554,31 +33540,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ256", + name: "VPSHLDDMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPSHLDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33586,31 +33572,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ512", + name: "VPSHLDQMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPSHLDQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33618,15 +33604,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEQB128", + name: "VPSHRDW128", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33638,10 +33624,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQB128", + name: "VPSHRDW256", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33653,74 +33639,70 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked128", + name: "VPSHRDW512", auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VGF2P8AFFINEQBMasked128", + name: "VPSHRDD128", auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUB128", + name: "VPSHRDD256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUB, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPSHRDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VGF2P8AFFINEQB256", + name: "VPSHRDQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33732,10 +33714,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQB256", + name: "VPSHRDQ256", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33747,10 +33729,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked256", + name: "VPSHRDQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDWMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33763,10 +33760,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQBMasked256", + name: "VPSHRDWMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33779,26 +33776,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB256", + name: "VPSHRDWMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPSHRDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33806,45 +33803,47 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEQB512", + name: "VPSHRDDMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEQB, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEINVQB512", + name: "VPSHRDDMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEINVQBMasked512", + name: "VPSHRDQMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33857,10 +33856,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQBMasked512", + name: "VPSHRDQMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33873,17 +33872,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB512", + name: "VPSHRDQMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -60920,1599 +60920,2034 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AtomicOr8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AtomicOr8value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "PubBarrier", + argLen: 1, + hasSideEffects: true, + generic: true, + }, + { + name: "Clobber", + auxType: auxSymOff, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "ClobberReg", + argLen: 0, + generic: true, + }, + { + name: "PrefetchCache", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "PrefetchCacheStreamed", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "Add32x4", + argLen: 2, + generic: true, + }, + { + name: "ZeroSIMD", + argLen: 0, + generic: true, + }, + { + name: "LoadMask8x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x64", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x2", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x8", + argLen: 2, + generic: true, + }, + { + name: "StoreMask8x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x64", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x2", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "AbsoluteInt8x16", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt8x32", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt8x64", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt16x8", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt16x16", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt16x32", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt32x4", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt32x8", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt32x16", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt64x2", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt64x4", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt64x8", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteMaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt8x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt64x2", + argLen: 2, + generic: true, }, { - name: "AtomicStore8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AbsoluteMaskedInt64x4", + argLen: 2, + generic: true, }, { - name: "AtomicStore32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AbsoluteMaskedInt64x8", + argLen: 2, + generic: true, }, { - name: "AtomicStore64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAdd32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAdd64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicExchange8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicExchange32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicExchange64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap32Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "AddInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap64Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "AddInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PubBarrier", - argLen: 1, - hasSideEffects: true, - generic: true, + name: "AddInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Clobber", - auxType: auxSymOff, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "AddInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ClobberReg", - argLen: 0, - generic: true, + name: "AddInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PrefetchCache", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "AddInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PrefetchCacheStreamed", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "AddMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Add32x4", - argLen: 2, - generic: true, + name: "AddMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ZeroSIMD", - argLen: 0, - generic: true, + name: "AddMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask8x16", - argLen: 2, - generic: true, + name: "AddMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask8x32", - argLen: 2, - generic: true, + name: "AddMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask8x64", - argLen: 2, - generic: true, + name: "AddMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask16x8", - argLen: 2, - generic: true, + name: "AddMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask16x16", - argLen: 2, - generic: true, + name: "AddMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask16x32", - argLen: 2, - generic: true, + name: "AddMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask32x4", - argLen: 2, - generic: true, + name: "AddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask32x8", - argLen: 2, - generic: true, + name: "AddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask32x16", - argLen: 2, - generic: true, + name: "AddMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask64x2", - argLen: 2, - generic: true, + name: "AddMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask64x4", - argLen: 2, - generic: true, + name: "AddMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask64x8", - argLen: 2, - generic: true, + name: "AddMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask8x16", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask8x32", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask8x64", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask16x8", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask16x16", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask16x32", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask32x4", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask32x8", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask32x16", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask64x2", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask64x4", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask64x8", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddFloat32x16", - argLen: 2, + name: "AddMaskedUint64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedFloat32x16", + name: "AddMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "ApproximateReciprocalFloat32x16", - argLen: 1, - generic: true, + name: "AddMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalMaskedFloat32x16", + name: "AddSubFloat32x4", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x16", - argLen: 1, + name: "AddSubFloat32x8", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", + name: "AddSubFloat64x2", argLen: 2, generic: true, }, { - name: "CompressFloat32x16", + name: "AddSubFloat64x4", argLen: 2, generic: true, }, { - name: "DivFloat32x16", - argLen: 2, - generic: true, + name: "AddUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "DivMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AddUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "EqualFloat32x16", + name: "AddUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "EqualMaskedFloat32x16", - argLen: 3, + name: "AddUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "FusedMultiplyAddFloat32x16", - argLen: 3, - generic: true, + name: "AddUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddMaskedFloat32x16", - argLen: 4, - generic: true, + name: "AddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubFloat32x16", - argLen: 3, - generic: true, + name: "AddUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat32x16", - argLen: 4, - generic: true, + name: "AddUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddFloat32x16", - argLen: 3, - generic: true, + name: "AddUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat32x16", - argLen: 4, - generic: true, + name: "AddUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterFloat32x16", - argLen: 2, - generic: true, + name: "AddUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualFloat32x16", - argLen: 2, - generic: true, + name: "AddUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsNanFloat32x16", + name: "AndInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat32x16", - argLen: 3, + name: "AndInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x16", - argLen: 2, - generic: true, + name: "AndInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualFloat32x16", - argLen: 2, - generic: true, + name: "AndInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxFloat32x16", - argLen: 2, + name: "AndMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedFloat32x16", + name: "AndMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MinFloat32x16", - argLen: 2, + name: "AndMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x16", + name: "AndMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x16", - argLen: 2, + name: "AndMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x16", - argLen: 2, - generic: true, + name: "AndMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MulByPowOf2MaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MulMaskedFloat32x16", + name: "AndMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualFloat32x16", - argLen: 2, + name: "AndMaskedUint64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat32x16", + name: "AndMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "SqrtFloat32x16", - argLen: 1, - generic: true, + name: "AndMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SqrtMaskedFloat32x16", + name: "AndNotInt8x16", argLen: 2, generic: true, }, { - name: "SubFloat32x16", + name: "AndNotInt8x32", argLen: 2, generic: true, }, { - name: "SubMaskedFloat32x16", - argLen: 3, + name: "AndNotInt16x8", + argLen: 2, generic: true, }, { - name: "AddFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSubFloat32x4", + name: "AndNotInt16x16", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat32x4", - argLen: 1, + name: "AndNotInt32x4", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalMaskedFloat32x4", + name: "AndNotInt32x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x4", - argLen: 1, + name: "AndNotInt32x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", + name: "AndNotInt64x2", argLen: 2, generic: true, }, { - name: "CeilFloat32x4", - argLen: 1, + name: "AndNotInt64x4", + argLen: 2, generic: true, }, { - name: "CompressFloat32x4", + name: "AndNotInt64x8", argLen: 2, generic: true, }, { - name: "DivFloat32x4", - argLen: 2, + name: "AndNotMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "DivMaskedFloat32x4", + name: "AndNotMaskedInt32x8", argLen: 3, generic: true, }, { - name: "DotProdBroadcastFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotMaskedInt32x16", + argLen: 3, + generic: true, }, { - name: "EqualFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotMaskedInt64x2", + argLen: 3, + generic: true, }, { - name: "EqualMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "AndNotMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "FloorFloat32x4", - argLen: 1, + name: "AndNotMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "FusedMultiplyAddFloat32x4", + name: "AndNotMaskedUint32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat32x4", - argLen: 4, + name: "AndNotMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x4", + name: "AndNotMaskedUint32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat32x4", - argLen: 4, + name: "AndNotMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x4", + name: "AndNotMaskedUint64x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat32x4", - argLen: 4, + name: "AndNotMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "GreaterFloat32x4", + name: "AndNotUint8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x4", + name: "AndNotUint8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedFloat32x4", - argLen: 3, + name: "AndNotUint16x8", + argLen: 2, generic: true, }, { - name: "GreaterMaskedFloat32x4", - argLen: 3, + name: "AndNotUint16x16", + argLen: 2, generic: true, }, { - name: "IsNanFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x4", + argLen: 2, + generic: true, }, { - name: "IsNanMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "AndNotUint32x8", + argLen: 2, + generic: true, }, { - name: "LessFloat32x4", + name: "AndNotUint32x16", argLen: 2, generic: true, }, { - name: "LessEqualFloat32x4", + name: "AndNotUint64x2", argLen: 2, generic: true, }, { - name: "LessEqualMaskedFloat32x4", - argLen: 3, + name: "AndNotUint64x4", + argLen: 2, generic: true, }, { - name: "LessMaskedFloat32x4", - argLen: 3, + name: "AndNotUint64x8", + argLen: 2, generic: true, }, { - name: "MaxFloat32x4", + name: "AndUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat32x4", - argLen: 3, + name: "AndUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x4", + name: "AndUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x4", - argLen: 3, + name: "AndUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "AndUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MulMaskedFloat32x4", - argLen: 3, + name: "AndUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualFloat32x4", + name: "AndUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat32x4", - argLen: 3, + name: "AndUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat32x4", - argLen: 2, - generic: true, + name: "AndUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PairwiseSubFloat32x4", - argLen: 2, - generic: true, + name: "AndUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RoundFloat32x4", + name: "ApproximateReciprocalFloat32x4", argLen: 1, generic: true, }, { - name: "SqrtFloat32x4", + name: "ApproximateReciprocalFloat32x8", argLen: 1, generic: true, }, { - name: "SqrtMaskedFloat32x4", - argLen: 2, + name: "ApproximateReciprocalFloat32x16", + argLen: 1, generic: true, }, { - name: "SubFloat32x4", - argLen: 2, + name: "ApproximateReciprocalFloat64x2", + argLen: 1, generic: true, }, { - name: "SubMaskedFloat32x4", - argLen: 3, + name: "ApproximateReciprocalFloat64x4", + argLen: 1, generic: true, }, { - name: "TruncFloat32x4", + name: "ApproximateReciprocalFloat64x8", argLen: 1, generic: true, }, { - name: "AddFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSubFloat32x8", + name: "ApproximateReciprocalMaskedFloat32x4", argLen: 2, generic: true, }, - { - name: "ApproximateReciprocalFloat32x8", - argLen: 1, - generic: true, - }, { name: "ApproximateReciprocalMaskedFloat32x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", + name: "ApproximateReciprocalMaskedFloat32x16", argLen: 2, generic: true, }, { - name: "CeilFloat32x8", - argLen: 1, + name: "ApproximateReciprocalMaskedFloat64x2", + argLen: 2, generic: true, }, { - name: "CompressFloat32x8", + name: "ApproximateReciprocalMaskedFloat64x4", argLen: 2, generic: true, }, { - name: "DivFloat32x8", + name: "ApproximateReciprocalMaskedFloat64x8", argLen: 2, generic: true, }, { - name: "DivMaskedFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtFloat32x4", + argLen: 1, generic: true, }, { - name: "DotProdBroadcastFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "ApproximateReciprocalOfSqrtFloat32x8", + argLen: 1, + generic: true, }, { - name: "FloorFloat32x8", + name: "ApproximateReciprocalOfSqrtFloat32x16", argLen: 1, generic: true, }, { - name: "FusedMultiplyAddFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtFloat64x2", + argLen: 1, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat32x8", - argLen: 4, + name: "ApproximateReciprocalOfSqrtFloat64x4", + argLen: 1, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtFloat64x8", + argLen: 1, generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat32x8", - argLen: 4, + name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", + argLen: 2, generic: true, }, { - name: "FusedMultiplySubAddFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat32x8", - argLen: 4, + name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", + argLen: 2, generic: true, }, { - name: "GreaterFloat32x8", + name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x8", + name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { - name: "GreaterMaskedFloat32x8", - argLen: 3, - generic: true, + name: "AverageMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "IsNanFloat32x8", - argLen: 2, + name: "AverageMaskedUint8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat32x8", + name: "AverageMaskedUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat32x8", - argLen: 2, - generic: true, + name: "AverageMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat32x8", - argLen: 2, - generic: true, + name: "AverageMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat32x8", - argLen: 3, - generic: true, + name: "AverageMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat32x8", - argLen: 3, - generic: true, + name: "AverageUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxFloat32x8", + name: "AverageUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat32x8", - argLen: 3, + name: "AverageUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x8", + name: "AverageUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x8", - argLen: 3, + name: "AverageUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "AverageUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x8", + name: "CeilFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "CeilFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "CeilFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "CeilFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "CompressFloat32x4", argLen: 2, generic: true, }, { - name: "MulByPowOf2MaskedFloat32x8", - argLen: 3, + name: "CompressFloat32x8", + argLen: 2, generic: true, }, { - name: "MulMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "CompressFloat32x16", + argLen: 2, + generic: true, }, { - name: "NotEqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "CompressFloat64x2", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "CompressFloat64x4", + argLen: 2, + generic: true, }, { - name: "PairwiseAddFloat32x8", + name: "CompressFloat64x8", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat32x8", + name: "CompressInt8x16", argLen: 2, generic: true, }, { - name: "RoundFloat32x8", - argLen: 1, + name: "CompressInt8x32", + argLen: 2, generic: true, }, { - name: "SqrtFloat32x8", - argLen: 1, + name: "CompressInt8x64", + argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat32x8", + name: "CompressInt16x8", argLen: 2, generic: true, }, { - name: "SubFloat32x8", + name: "CompressInt16x16", argLen: 2, generic: true, }, { - name: "SubMaskedFloat32x8", - argLen: 3, + name: "CompressInt16x32", + argLen: 2, generic: true, }, { - name: "TruncFloat32x8", - argLen: 1, + name: "CompressInt32x4", + argLen: 2, generic: true, }, { - name: "AddFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "CompressInt32x8", + argLen: 2, + generic: true, }, { - name: "AddMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "CompressInt32x16", + argLen: 2, + generic: true, }, { - name: "AddSubFloat64x2", + name: "CompressInt64x2", + argLen: 2, + generic: true, + }, + { + name: "CompressInt64x4", + argLen: 2, + generic: true, + }, + { + name: "CompressInt64x8", + argLen: 2, + generic: true, + }, + { + name: "CompressUint8x16", + argLen: 2, + generic: true, + }, + { + name: "CompressUint8x32", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x2", - argLen: 1, + name: "CompressUint8x64", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalMaskedFloat64x2", + name: "CompressUint16x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x2", - argLen: 1, + name: "CompressUint16x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", + name: "CompressUint16x32", argLen: 2, generic: true, }, { - name: "CeilFloat64x2", - argLen: 1, + name: "CompressUint32x4", + argLen: 2, generic: true, }, { - name: "CompressFloat64x2", + name: "CompressUint32x8", argLen: 2, generic: true, }, { - name: "DivFloat64x2", + name: "CompressUint32x16", argLen: 2, generic: true, }, { - name: "DivMaskedFloat64x2", - argLen: 3, + name: "CompressUint64x2", + argLen: 2, generic: true, }, { - name: "DotProdBroadcastFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "CompressUint64x4", + argLen: 2, + generic: true, }, { - name: "EqualFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "CompressUint64x8", + argLen: 2, + generic: true, }, { - name: "EqualMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "DivFloat32x4", + argLen: 2, + generic: true, }, { - name: "FloorFloat64x2", - argLen: 1, + name: "DivFloat32x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddFloat64x2", - argLen: 3, + name: "DivFloat32x16", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat64x2", - argLen: 4, + name: "DivFloat64x2", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x2", - argLen: 3, + name: "DivFloat64x4", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat64x2", - argLen: 4, + name: "DivFloat64x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplySubAddFloat64x2", + name: "DivMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat64x2", - argLen: 4, + name: "DivMaskedFloat32x8", + argLen: 3, generic: true, }, { - name: "GreaterFloat64x2", - argLen: 2, + name: "DivMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualFloat64x2", - argLen: 2, + name: "DivMaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedFloat64x2", + name: "DivMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedFloat64x2", + name: "DivMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "IsNanFloat64x2", + name: "DotProdBroadcastFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat64x2", - argLen: 3, + name: "DotProdBroadcastFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "LessEqualFloat64x2", - argLen: 2, - generic: true, + name: "DotProdBroadcastFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxFloat64x2", + name: "EqualFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat64x2", - argLen: 3, + name: "EqualFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat64x2", + name: "EqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat64x2", - argLen: 3, + name: "EqualFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x2", + name: "EqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x2", - argLen: 2, - generic: true, + name: "EqualInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulByPowOf2MaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualInt8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulMaskedFloat64x2", - argLen: 3, + name: "EqualInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualFloat64x2", + name: "EqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat64x2", - argLen: 3, + name: "EqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PairwiseSubFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RoundFloat64x2", - argLen: 1, - generic: true, + name: "EqualInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SqrtFloat64x2", - argLen: 1, - generic: true, + name: "EqualInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SqrtMaskedFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "TruncFloat64x2", - argLen: 1, - generic: true, + name: "EqualMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddFloat64x4", - argLen: 2, + name: "EqualMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedFloat64x4", + name: "EqualMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "AddSubFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalMaskedFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CeilFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "DivFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "DivMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualFloat64x4", - argLen: 2, + name: "EqualMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedFloat64x4", + name: "EqualMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "FloorFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddMaskedFloat64x4", - argLen: 4, - generic: true, + name: "EqualMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat64x4", - argLen: 4, - generic: true, + name: "EqualMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat64x4", - argLen: 4, - generic: true, + name: "EqualMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "IsNanFloat64x4", - argLen: 2, + name: "EqualMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat64x4", + name: "EqualMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxFloat64x4", + name: "EqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat64x4", - argLen: 3, + name: "EqualUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat64x4", + name: "EqualUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat64x4", - argLen: 3, + name: "EqualUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x4", + name: "EqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x4", - argLen: 2, - generic: true, + name: "EqualUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulByPowOf2MaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulMaskedFloat64x4", - argLen: 3, + name: "EqualUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualFloat64x4", + name: "EqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat64x4", - argLen: 3, + name: "EqualUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat64x4", - argLen: 2, + name: "FloorFloat32x4", + argLen: 1, generic: true, }, { - name: "PairwiseSubFloat64x4", - argLen: 2, + name: "FloorFloat32x8", + argLen: 1, generic: true, }, { - name: "RoundFloat64x4", + name: "FloorFloat64x2", argLen: 1, generic: true, }, { - name: "SqrtFloat64x4", + name: "FloorFloat64x4", argLen: 1, generic: true, }, { - name: "SqrtMaskedFloat64x4", - argLen: 2, + name: "FusedMultiplyAddFloat32x4", + argLen: 3, generic: true, }, { - name: "SubFloat64x4", - argLen: 2, + name: "FusedMultiplyAddFloat32x8", + argLen: 3, generic: true, }, { - name: "SubMaskedFloat64x4", + name: "FusedMultiplyAddFloat32x16", argLen: 3, generic: true, }, { - name: "TruncFloat64x4", - argLen: 1, + name: "FusedMultiplyAddFloat64x2", + argLen: 3, generic: true, }, { - name: "AddFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplyAddFloat64x4", + argLen: 3, + generic: true, }, { - name: "AddMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplyAddFloat64x8", + argLen: 3, + generic: true, }, { - name: "ApproximateReciprocalFloat64x8", - argLen: 1, + name: "FusedMultiplyAddMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "ApproximateReciprocalMaskedFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x8", - argLen: 1, + name: "FusedMultiplyAddMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "CompressFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "DivFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "DivMaskedFloat64x8", + name: "FusedMultiplyAddSubFloat32x4", argLen: 3, generic: true, }, { - name: "EqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplyAddSubFloat32x8", + argLen: 3, + generic: true, }, { - name: "EqualMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplyAddSubFloat32x16", + argLen: 3, + generic: true, }, { - name: "FusedMultiplyAddFloat64x8", + name: "FusedMultiplyAddSubFloat64x2", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat64x8", - argLen: 4, + name: "FusedMultiplyAddSubFloat64x4", + argLen: 3, generic: true, }, { @@ -62521,2958 +62956,2882 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat64x8", + name: "FusedMultiplyAddSubMaskedFloat32x4", argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x8", - argLen: 3, + name: "FusedMultiplyAddSubMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat64x8", + name: "FusedMultiplyAddSubMaskedFloat32x16", argLen: 4, generic: true, }, { - name: "GreaterFloat64x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedFloat64x8", - argLen: 3, + name: "FusedMultiplyAddSubMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "GreaterMaskedFloat64x8", + name: "FusedMultiplySubAddFloat32x4", argLen: 3, generic: true, }, { - name: "IsNanFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "IsNanMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplySubAddFloat32x8", + argLen: 3, + generic: true, }, { - name: "LessFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddFloat32x16", + argLen: 3, generic: true, }, { - name: "LessEqualFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddFloat64x2", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedFloat64x8", + name: "FusedMultiplySubAddFloat64x4", argLen: 3, generic: true, }, { - name: "LessMaskedFloat64x8", + name: "FusedMultiplySubAddFloat64x8", argLen: 3, generic: true, }, { - name: "MaxFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat32x4", + argLen: 4, + generic: true, }, { - name: "MaxMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat32x8", + argLen: 4, + generic: true, }, { - name: "MinFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat32x16", + argLen: 4, + generic: true, }, { - name: "MinMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat64x2", + argLen: 4, + generic: true, }, { - name: "MulFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat64x4", + argLen: 4, + generic: true, }, { - name: "MulByPowOf2Float64x8", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "MulByPowOf2MaskedFloat64x8", + name: "GaloisFieldMulMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MulMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "GaloisFieldMulMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "NotEqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "GaloisFieldMulMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "GaloisFieldMulUint8x16", + argLen: 2, + generic: true, }, { - name: "SqrtFloat64x8", - argLen: 1, + name: "GaloisFieldMulUint8x32", + argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat64x8", + name: "GaloisFieldMulUint8x64", argLen: 2, generic: true, }, { - name: "SubFloat64x8", + name: "GreaterEqualFloat32x4", argLen: 2, generic: true, }, { - name: "SubMaskedFloat64x8", - argLen: 3, + name: "GreaterEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "AbsoluteInt16x16", - argLen: 1, + name: "GreaterEqualFloat32x16", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x16", + name: "GreaterEqualFloat64x2", argLen: 2, generic: true, }, { - name: "AddInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualFloat64x4", + argLen: 2, + generic: true, }, { - name: "AddMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualFloat64x8", + argLen: 2, + generic: true, }, { - name: "AndInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualInt8x16", + argLen: 2, + generic: true, }, { - name: "AndNotInt16x16", + name: "GreaterEqualInt8x32", argLen: 2, generic: true, }, { - name: "CompressInt16x16", + name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, { - name: "EqualInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualInt16x8", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualInt16x16", + argLen: 2, + generic: true, }, { - name: "GreaterInt16x16", + name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x16", + name: "GreaterEqualInt32x4", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt16x16", - argLen: 3, + name: "GreaterEqualInt32x8", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt16x16", - argLen: 3, + name: "GreaterEqualInt32x16", + argLen: 2, generic: true, }, { - name: "LessInt16x16", + name: "GreaterEqualInt64x2", argLen: 2, generic: true, }, { - name: "LessEqualInt16x16", + name: "GreaterEqualInt64x4", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt16x16", - argLen: 3, + name: "GreaterEqualInt64x8", + argLen: 2, generic: true, }, { - name: "LessMaskedInt16x16", + name: "GreaterEqualMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaxInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "MinInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "MulHighInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat64x4", + argLen: 3, + generic: true, }, { - name: "MulHighMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat64x8", + argLen: 3, + generic: true, }, { - name: "MulLowInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt8x16", + argLen: 3, + generic: true, }, { - name: "MulLowMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt8x32", + argLen: 3, + generic: true, }, { - name: "NotEqualInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt8x64", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt16x8", + argLen: 3, + generic: true, }, { - name: "OrInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt16x16", + argLen: 3, + generic: true, }, { - name: "PairDotProdInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "PairDotProdMaskedInt16x16", + name: "GreaterEqualMaskedInt32x4", argLen: 3, generic: true, }, { - name: "PairwiseAddInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "PairwiseSubInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "PopCountInt16x16", - argLen: 1, + name: "GreaterEqualMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt64x8", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "SaturatedPairwiseSubInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "SaturatedSubInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x16", + name: "GreaterEqualMaskedUint16x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x16", + name: "GreaterEqualMaskedUint32x4", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt16x16", + name: "GreaterEqualMaskedUint32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x16", + name: "GreaterEqualMaskedUint64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x16", - argLen: 4, + name: "GreaterEqualMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt16x16", - argLen: 3, + name: "GreaterEqualUint8x16", + argLen: 2, generic: true, }, { - name: "ShiftRightInt16x16", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x16", - argLen: 3, + name: "GreaterEqualUint8x64", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x16", - argLen: 4, + name: "GreaterEqualUint16x8", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedInt16x16", - argLen: 3, + name: "GreaterEqualUint16x16", + argLen: 2, generic: true, }, { - name: "SignInt16x16", + name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, { - name: "SubInt16x16", + name: "GreaterEqualUint32x4", argLen: 2, generic: true, }, { - name: "SubMaskedInt16x16", - argLen: 3, + name: "GreaterEqualUint32x8", + argLen: 2, generic: true, }, { - name: "XorInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualUint32x16", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt16x32", - argLen: 1, + name: "GreaterEqualUint64x2", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x32", + name: "GreaterEqualUint64x4", argLen: 2, generic: true, }, { - name: "AddInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualUint64x8", + argLen: 2, + generic: true, }, { - name: "AddMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterFloat32x4", + argLen: 2, + generic: true, }, { - name: "CompressInt16x32", + name: "GreaterFloat32x8", argLen: 2, generic: true, }, { - name: "EqualInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterFloat32x16", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterFloat64x2", + argLen: 2, + generic: true, }, { - name: "GreaterInt16x32", + name: "GreaterFloat64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x32", + name: "GreaterFloat64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt16x32", - argLen: 3, + name: "GreaterInt8x16", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt16x32", - argLen: 3, + name: "GreaterInt8x32", + argLen: 2, generic: true, }, { - name: "LessInt16x32", + name: "GreaterInt8x64", argLen: 2, generic: true, }, { - name: "LessEqualInt16x32", + name: "GreaterInt16x8", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt16x32", - argLen: 3, + name: "GreaterInt16x16", + argLen: 2, generic: true, }, { - name: "LessMaskedInt16x32", - argLen: 3, + name: "GreaterInt16x32", + argLen: 2, generic: true, }, { - name: "MaxInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterInt32x4", + argLen: 2, + generic: true, }, { - name: "MaxMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt32x8", + argLen: 2, + generic: true, }, { - name: "MinInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterInt32x16", + argLen: 2, + generic: true, }, { - name: "MinMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt64x2", + argLen: 2, + generic: true, }, { - name: "MulHighInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterInt64x4", + argLen: 2, + generic: true, }, { - name: "MulHighMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt64x8", + argLen: 2, + generic: true, }, { - name: "MulLowInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedFloat32x4", + argLen: 3, + generic: true, }, { - name: "MulLowMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "NotEqualInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedFloat32x16", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "PairDotProdInt16x32", - argLen: 2, + name: "GreaterMaskedFloat64x4", + argLen: 3, generic: true, }, { - name: "PairDotProdMaskedInt16x32", + name: "GreaterMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "PopCountInt16x32", - argLen: 1, + name: "GreaterMaskedInt8x16", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt16x32", - argLen: 2, + name: "GreaterMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedInt8x64", + argLen: 3, + generic: true, }, { - name: "SaturatedSubInt16x32", - argLen: 2, + name: "GreaterMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x32", + name: "GreaterMaskedInt16x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt16x32", - argLen: 2, + name: "GreaterMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x32", + name: "GreaterMaskedInt32x4", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt16x32", - argLen: 2, + name: "GreaterMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt16x32", + name: "GreaterMaskedInt32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftInt16x32", - argLen: 2, + name: "GreaterMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x32", + name: "GreaterMaskedInt64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x32", - argLen: 4, + name: "GreaterMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt16x32", + name: "GreaterMaskedUint8x16", argLen: 3, generic: true, }, { - name: "ShiftRightInt16x32", - argLen: 2, + name: "GreaterMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x32", + name: "GreaterMaskedUint8x64", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x32", - argLen: 4, + name: "GreaterMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt16x32", + name: "GreaterMaskedUint16x16", argLen: 3, generic: true, }, { - name: "SubInt16x32", - argLen: 2, + name: "GreaterMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "SubMaskedInt16x32", + name: "GreaterMaskedUint32x4", argLen: 3, generic: true, }, { - name: "AbsoluteInt16x8", - argLen: 1, + name: "GreaterMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "AbsoluteMaskedInt16x8", - argLen: 2, + name: "GreaterMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "AddInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedUint64x2", + argLen: 3, + generic: true, }, { - name: "AddMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedUint64x4", + argLen: 3, + generic: true, }, { - name: "AndInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedUint64x8", + argLen: 3, + generic: true, }, { - name: "AndNotInt16x8", + name: "GreaterUint8x16", argLen: 2, generic: true, }, { - name: "CompressInt16x8", + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "EqualInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterUint8x64", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterUint16x8", + argLen: 2, + generic: true, }, { - name: "GreaterInt16x8", + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x8", + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt16x8", - argLen: 3, + name: "GreaterUint32x4", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt16x8", - argLen: 3, + name: "GreaterUint32x8", + argLen: 2, generic: true, }, { - name: "LessInt16x8", + name: "GreaterUint32x16", argLen: 2, generic: true, }, { - name: "LessEqualInt16x8", + name: "GreaterUint64x2", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt16x8", - argLen: 3, + name: "GreaterUint64x4", + argLen: 2, generic: true, }, { - name: "LessMaskedInt16x8", - argLen: 3, + name: "GreaterUint64x8", + argLen: 2, generic: true, }, { - name: "MaxInt16x8", + name: "IsNanFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MinInt16x8", + name: "IsNanFloat32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt16x8", - argLen: 3, + name: "IsNanFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", + name: "IsNanFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x8", - argLen: 3, + name: "IsNanFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x8", + name: "IsNanFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowMaskedInt16x8", + name: "IsNanMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt16x8", - argLen: 2, + name: "IsNanMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt16x8", + name: "IsNanMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "OrInt16x8", - argLen: 2, + name: "IsNanMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "PairDotProdInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "PairwiseAddInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountInt16x8", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "SaturatedAddInt16x8", - argLen: 2, + name: "IsNanMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedInt16x8", + name: "IsNanMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedPairwiseAddInt16x8", + name: "LessEqualFloat32x4", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x8", + name: "LessEqualFloat32x8", argLen: 2, generic: true, }, { - name: "SaturatedSubInt16x8", + name: "LessEqualFloat32x16", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftInt16x8", + name: "LessEqualFloat64x2", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightInt16x8", + name: "LessEqualFloat64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftInt16x8", + name: "LessEqualFloat64x8", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftAndFillUpperFromMaskedInt16x8", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftRightInt16x8", + name: "LessEqualInt8x16", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftRightAndFillUpperFromMaskedInt16x8", - argLen: 4, - generic: true, - }, - { - name: "ShiftRightMaskedInt16x8", - argLen: 3, + name: "LessEqualInt8x32", + argLen: 2, generic: true, }, { - name: "SignInt16x8", + name: "LessEqualInt8x64", argLen: 2, generic: true, }, { - name: "SubInt16x8", + name: "LessEqualInt16x8", argLen: 2, generic: true, }, { - name: "SubMaskedInt16x8", - argLen: 3, + name: "LessEqualInt16x16", + argLen: 2, generic: true, }, { - name: "XorInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt32x16", - argLen: 1, + name: "LessEqualInt16x32", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x16", + name: "LessEqualInt32x4", argLen: 2, generic: true, }, { - name: "AddInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndNotInt32x16", + name: "LessEqualInt32x8", argLen: 2, generic: true, }, { - name: "AndNotMaskedInt32x16", - argLen: 3, + name: "LessEqualInt32x16", + argLen: 2, generic: true, }, { - name: "CompressInt32x16", + name: "LessEqualInt64x2", argLen: 2, generic: true, }, { - name: "EqualInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "GreaterInt32x16", + name: "LessEqualInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x16", + name: "LessEqualInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt32x16", + name: "LessEqualMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedInt32x16", + name: "LessEqualMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "LessInt32x16", - argLen: 2, + name: "LessEqualMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt32x16", - argLen: 2, + name: "LessEqualMaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedInt32x16", + name: "LessEqualMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "LessMaskedInt32x16", + name: "LessEqualMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaxInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MinInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "PairDotProdAccumulateInt32x16", + name: "LessEqualMaskedInt8x16", argLen: 3, generic: true, }, { - name: "PairDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "PopCountInt32x16", - argLen: 1, + name: "LessEqualMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt32x16", - argLen: 2, + name: "LessEqualMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "RotateLeftInt32x16", - argLen: 2, + name: "LessEqualMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedInt32x16", + name: "LessEqualMaskedInt16x32", argLen: 3, generic: true, }, { - name: "RotateRightInt32x16", - argLen: 2, + name: "LessEqualMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedInt32x16", + name: "LessEqualMaskedInt32x8", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x16", + name: "LessEqualMaskedInt32x16", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "LessEqualMaskedInt64x4", argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt32x16", - argLen: 2, + name: "LessEqualMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt32x16", + name: "LessEqualMaskedUint8x32", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt32x16", - argLen: 2, + name: "LessEqualMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt32x16", + name: "LessEqualMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftLeftInt32x16", - argLen: 2, + name: "LessEqualMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x16", + name: "LessEqualMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt32x16", + name: "LessEqualMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftRightInt32x16", - argLen: 2, + name: "LessEqualMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x16", + name: "LessEqualMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt32x16", + name: "LessEqualMaskedUint64x8", argLen: 3, generic: true, }, { - name: "SubInt32x16", + name: "LessEqualUint8x16", argLen: 2, generic: true, }, { - name: "SubMaskedInt32x16", - argLen: 3, + name: "LessEqualUint8x32", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, + name: "LessEqualUint8x64", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualUint16x8", + argLen: 2, generic: true, }, { - name: "XorInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint16x16", + argLen: 2, + generic: true, }, { - name: "XorMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualUint16x32", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt32x4", - argLen: 1, + name: "LessEqualUint32x4", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x4", + name: "LessEqualUint32x8", argLen: 2, generic: true, }, { - name: "AddInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint32x16", + argLen: 2, + generic: true, }, { - name: "AddMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualUint64x2", + argLen: 2, + generic: true, }, { - name: "AndInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint64x4", + argLen: 2, + generic: true, }, { - name: "AndMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualUint64x8", + argLen: 2, + generic: true, }, { - name: "AndNotInt32x4", + name: "LessFloat32x4", argLen: 2, generic: true, }, { - name: "AndNotMaskedInt32x4", - argLen: 3, + name: "LessFloat32x8", + argLen: 2, generic: true, }, { - name: "CompressInt32x4", + name: "LessFloat32x16", argLen: 2, generic: true, }, { - name: "EqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessFloat64x2", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessFloat64x4", + argLen: 2, + generic: true, }, { - name: "GreaterInt32x4", + name: "LessFloat64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x4", + name: "LessInt8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt32x4", - argLen: 3, + name: "LessInt8x32", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt32x4", - argLen: 3, + name: "LessInt8x64", + argLen: 2, generic: true, }, { - name: "LessInt32x4", + name: "LessInt16x8", argLen: 2, generic: true, }, { - name: "LessEqualInt32x4", + name: "LessInt16x16", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt32x4", - argLen: 3, + name: "LessInt16x32", + argLen: 2, generic: true, }, { - name: "LessMaskedInt32x4", - argLen: 3, + name: "LessInt32x4", + argLen: 2, generic: true, }, { - name: "MaxInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessInt32x8", + argLen: 2, + generic: true, }, { - name: "MaxMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessInt32x16", + argLen: 2, + generic: true, }, { - name: "MinInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessInt64x2", + argLen: 2, + generic: true, }, { - name: "MinMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessInt64x4", + argLen: 2, + generic: true, }, { - name: "MulEvenWidenInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessInt64x8", + argLen: 2, + generic: true, }, { - name: "MulLowInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessMaskedFloat32x4", + argLen: 3, + generic: true, }, { - name: "MulLowMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "NotEqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessMaskedFloat32x16", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "OrInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessMaskedFloat64x4", + argLen: 3, + generic: true, }, { - name: "OrMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessMaskedFloat64x8", + argLen: 3, + generic: true, }, { - name: "PairDotProdAccumulateInt32x4", + name: "LessMaskedInt8x16", argLen: 3, generic: true, }, { - name: "PairDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "PairwiseAddInt32x4", - argLen: 2, + name: "LessMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "PairwiseSubInt32x4", - argLen: 2, + name: "LessMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "PopCountInt32x4", - argLen: 1, + name: "LessMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt32x4", - argLen: 2, + name: "LessMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "RotateLeftInt32x4", - argLen: 2, + name: "LessMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedInt32x4", + name: "LessMaskedInt32x8", argLen: 3, generic: true, }, { - name: "RotateRightInt32x4", - argLen: 2, + name: "LessMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedInt32x4", + name: "LessMaskedInt64x2", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x4", + name: "LessMaskedInt64x4", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + name: "LessMaskedUint8x16", argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt32x4", - argLen: 2, + name: "LessMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt32x4", + name: "LessMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt32x4", - argLen: 2, + name: "LessMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt32x4", + name: "LessMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftInt32x4", - argLen: 2, + name: "LessMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x4", + name: "LessMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x4", - argLen: 4, + name: "LessMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt32x4", + name: "LessMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightInt32x4", - argLen: 2, + name: "LessMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x4", + name: "LessMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x4", - argLen: 4, + name: "LessUint8x16", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedInt32x4", - argLen: 3, + name: "LessUint8x32", + argLen: 2, generic: true, }, { - name: "SignInt32x4", + name: "LessUint8x64", argLen: 2, generic: true, }, { - name: "SubInt32x4", + name: "LessUint16x8", argLen: 2, generic: true, }, { - name: "SubMaskedInt32x4", - argLen: 3, + name: "LessUint16x16", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, + name: "LessUint16x32", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessUint32x4", + argLen: 2, generic: true, }, { - name: "XorInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessUint32x8", + argLen: 2, + generic: true, }, { - name: "XorMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessUint32x16", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt32x8", - argLen: 1, + name: "LessUint64x2", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x8", + name: "LessUint64x4", argLen: 2, generic: true, }, { - name: "AddInt32x8", + name: "LessUint64x8", + argLen: 2, + generic: true, + }, + { + name: "MaxFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt32x8", - argLen: 3, + name: "MaxFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x8", + name: "MaxFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedInt32x8", - argLen: 3, + name: "MaxFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x8", - argLen: 2, - generic: true, + name: "MaxFloat64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AndNotMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CompressInt32x8", - argLen: 2, - generic: true, + name: "MaxInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "EqualInt32x8", + name: "MaxInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "EqualMaskedInt32x8", - argLen: 3, + name: "MaxInt8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x8", - argLen: 2, - generic: true, + name: "MaxInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt32x8", - argLen: 2, - generic: true, + name: "MaxInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessInt32x8", - argLen: 2, - generic: true, + name: "MaxInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualInt32x8", - argLen: 2, - generic: true, + name: "MaxInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxInt32x8", + name: "MaxInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt32x8", + name: "MaxMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MinInt32x8", - argLen: 2, + name: "MaxMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedInt32x8", + name: "MaxMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt32x8", - argLen: 2, + name: "MaxMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt32x8", - argLen: 2, + name: "MaxMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowMaskedInt32x8", + name: "MaxMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt32x8", - argLen: 2, + name: "MaxMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt32x8", + name: "MaxMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "OrInt32x8", - argLen: 2, + name: "MaxMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "OrMaskedInt32x8", + name: "MaxMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PairwiseAddInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PairwiseSubInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountInt32x8", - argLen: 1, - generic: true, + name: "MaxMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedPairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightInt32x8", - argLen: 2, - generic: true, + name: "MaxUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x8", - argLen: 3, - generic: true, + name: "MaxUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignInt32x8", - argLen: 2, - generic: true, + name: "MaxUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubInt32x8", - argLen: 2, - generic: true, + name: "MaxUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt32x8", + name: "MaxUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedInt32x8", - argLen: 3, + name: "MinFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x2", - argLen: 1, - generic: true, + name: "MinFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt64x2", - argLen: 2, - generic: true, + name: "MinFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt64x2", + name: "MinFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt64x2", - argLen: 3, + name: "MinFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x2", + name: "MinFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedInt64x2", - argLen: 3, + name: "MinInt8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x2", - argLen: 2, - generic: true, + name: "MinInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AndNotMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinInt8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CompressInt64x2", - argLen: 2, - generic: true, + name: "MinInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "EqualInt64x2", + name: "MinInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "EqualMaskedInt64x2", - argLen: 3, + name: "MinInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x2", - argLen: 2, - generic: true, + name: "MinInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x2", - argLen: 2, - generic: true, + name: "MinInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessInt64x2", - argLen: 2, - generic: true, + name: "MinInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualInt64x2", - argLen: 2, - generic: true, + name: "MinInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxInt64x2", - argLen: 2, + name: "MinMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x2", + name: "MinMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MinInt64x2", - argLen: 2, + name: "MinMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedInt64x2", + name: "MinMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x2", - argLen: 2, + name: "MinMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x2", + name: "MinMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt64x2", - argLen: 2, + name: "MinMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowMaskedInt64x2", + name: "MinMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt64x2", - argLen: 2, + name: "MinMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt64x2", + name: "MinMaskedInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "OrInt64x2", - argLen: 2, + name: "MinMaskedInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "OrMaskedInt64x2", + name: "MinMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "PopCountInt64x2", - argLen: 1, - generic: true, + name: "MinMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x2", - argLen: 4, - generic: true, + name: "MinMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x2", - argLen: 4, - generic: true, + name: "MinUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubInt64x2", - argLen: 2, - generic: true, + name: "MinUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt64x2", + name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedInt64x2", - argLen: 3, + name: "MinUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x4", - argLen: 1, - generic: true, + name: "MinUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt64x4", - argLen: 2, - generic: true, + name: "MinUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt64x4", + name: "MinUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt64x4", - argLen: 3, + name: "MinUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x4", + name: "MinUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedInt64x4", - argLen: 3, + name: "MinUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x4", + name: "MulByPowOf2Float32x4", argLen: 2, generic: true, }, { - name: "AndNotMaskedInt64x4", - argLen: 3, + name: "MulByPowOf2Float32x8", + argLen: 2, generic: true, }, { - name: "CompressInt64x4", + name: "MulByPowOf2Float32x16", argLen: 2, generic: true, }, { - name: "EqualInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "MulByPowOf2Float64x2", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MulByPowOf2Float64x4", + argLen: 2, + generic: true, }, { - name: "GreaterInt64x4", + name: "MulByPowOf2Float64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt64x4", - argLen: 2, + name: "MulByPowOf2MaskedFloat32x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedInt64x4", + name: "MulByPowOf2MaskedFloat32x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedInt64x4", + name: "MulByPowOf2MaskedFloat32x16", argLen: 3, generic: true, }, { - name: "LessInt64x4", - argLen: 2, + name: "MulByPowOf2MaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "LessEqualInt64x4", - argLen: 2, + name: "MulByPowOf2MaskedFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulByPowOf2MaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulEvenWidenInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulEvenWidenMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxInt64x4", - argLen: 2, + name: "MulEvenWidenMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x4", + name: "MulEvenWidenMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MinInt64x4", - argLen: 2, + name: "MulEvenWidenMaskedUint64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedInt64x4", + name: "MulEvenWidenMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x4", + name: "MulEvenWidenUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x4", - argLen: 3, + name: "MulEvenWidenUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x4", + name: "MulEvenWidenUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowMaskedInt64x4", - argLen: 3, + name: "MulEvenWidenUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x4", + name: "MulEvenWidenUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt64x4", - argLen: 3, + name: "MulFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x4", + name: "MulFloat32x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrMaskedInt64x4", - argLen: 3, + name: "MulFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x4", - argLen: 1, - generic: true, + name: "MulFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt64x4", - argLen: 2, - generic: true, + name: "MulFloat64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftInt64x4", - argLen: 2, - generic: true, + name: "MulFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightInt64x4", - argLen: 2, - generic: true, + name: "MulHighInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt64x4", - argLen: 2, - generic: true, + name: "MulHighMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt64x4", - argLen: 2, - generic: true, + name: "MulHighMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt64x4", - argLen: 2, - generic: true, + name: "MulHighMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x4", - argLen: 3, - generic: true, + name: "MulHighMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x4", - argLen: 4, - generic: true, + name: "MulHighUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightInt64x4", - argLen: 2, - generic: true, + name: "MulHighUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x4", - argLen: 3, - generic: true, + name: "MulLowInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x4", - argLen: 4, - generic: true, + name: "MulLowInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulLowInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubInt64x4", - argLen: 2, - generic: true, + name: "MulLowInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulLowInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt64x4", + name: "MulLowInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedInt64x4", - argLen: 3, + name: "MulLowInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x8", - argLen: 1, - generic: true, + name: "MulLowInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt64x8", - argLen: 2, - generic: true, + name: "MulLowInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt64x8", - argLen: 2, + name: "MulLowMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedInt64x8", + name: "MulLowMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "AndInt64x8", - argLen: 2, + name: "MulLowMaskedInt16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "AndMaskedInt64x8", + name: "MulLowMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "AndNotInt64x8", - argLen: 2, - generic: true, + name: "MulLowMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AndNotMaskedInt64x8", - argLen: 3, - generic: true, + name: "MulLowMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressInt64x8", - argLen: 2, - generic: true, + name: "MulLowMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualInt64x8", - argLen: 2, + name: "MulLowMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedInt64x8", + name: "MulLowMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt64x8", - argLen: 3, - generic: true, + name: "MulMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt64x8", - argLen: 3, - generic: true, + name: "MulMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxInt64x8", + name: "NotEqualFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x8", - argLen: 3, + name: "NotEqualFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x8", + name: "NotEqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt64x8", - argLen: 3, + name: "NotEqualFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x8", - argLen: 3, + name: "NotEqualInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", + name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowMaskedInt64x8", - argLen: 3, + name: "NotEqualInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x8", + name: "NotEqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt64x8", - argLen: 3, + name: "NotEqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x8", + name: "NotEqualInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrMaskedInt64x8", - argLen: 3, + name: "NotEqualInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x8", - argLen: 1, - generic: true, + name: "NotEqualInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt64x8", - argLen: 2, - generic: true, + name: "NotEqualInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftInt64x8", - argLen: 2, - generic: true, + name: "NotEqualInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x8", - argLen: 4, - generic: true, + name: "NotEqualMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x8", - argLen: 4, - generic: true, + name: "NotEqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SubInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SubMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "XorInt64x8", - argLen: 2, + name: "NotEqualMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "XorMaskedInt64x8", + name: "NotEqualMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "AbsoluteInt8x16", - argLen: 1, - generic: true, + name: "NotEqualMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddInt8x16", - argLen: 2, + name: "NotEqualMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "NotEqualMaskedUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedInt8x16", + name: "NotEqualMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "AndInt8x16", - argLen: 2, + name: "NotEqualMaskedUint16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "AndNotInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualInt8x16", - argLen: 2, + name: "NotEqualMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedInt8x16", + name: "NotEqualMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessInt8x16", - argLen: 2, - generic: true, + name: "NotEqualUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualInt8x16", - argLen: 2, - generic: true, + name: "NotEqualUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxInt8x16", + name: "NotEqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt8x16", - argLen: 3, + name: "NotEqualUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x16", + name: "NotEqualUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt8x16", - argLen: 3, + name: "NotEqualUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x16", + name: "NotEqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt8x16", - argLen: 3, + name: "NotEqualUint64x8", + argLen: 2, commutative: true, generic: true, }, @@ -65483,578 +65842,508 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountInt8x16", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedAddInt8x16", + name: "OrInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedInt8x16", - argLen: 3, + name: "OrInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "SignInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SubInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SubMaskedInt8x16", - argLen: 3, - generic: true, + name: "OrInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt8x16", + name: "OrInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt8x32", - argLen: 1, - generic: true, + name: "OrInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt8x32", - argLen: 2, - generic: true, + name: "OrInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt8x32", + name: "OrInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt8x32", - argLen: 3, + name: "OrInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt8x32", + name: "OrInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualInt8x32", - argLen: 2, + name: "OrMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedInt8x32", + name: "OrMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxInt8x32", + name: "OrUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt8x32", - argLen: 3, + name: "OrUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x32", + name: "OrUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt8x32", - argLen: 3, + name: "OrUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x32", + name: "OrUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt8x32", - argLen: 3, + name: "OrUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "OrInt8x32", + name: "OrUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt8x32", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt8x32", - argLen: 2, - generic: true, + name: "OrUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedAddInt8x32", + name: "OrUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedInt8x32", - argLen: 3, + name: "OrUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x32", - argLen: 2, + name: "PairDotProdAccumulateInt32x4", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt8x32", + name: "PairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, { - name: "SignInt8x32", - argLen: 2, + name: "PairDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "SubInt8x32", - argLen: 2, + name: "PairDotProdAccumulateMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "SubMaskedInt8x32", - argLen: 3, + name: "PairDotProdAccumulateMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "XorInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt8x64", - argLen: 1, + name: "PairDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "AbsoluteMaskedInt8x64", + name: "PairDotProdInt16x8", argLen: 2, generic: true, }, { - name: "AddInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "CompressInt8x64", + name: "PairDotProdInt16x16", argLen: 2, generic: true, }, { - name: "EqualInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "GreaterInt8x64", + name: "PairDotProdInt16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualInt8x64", - argLen: 2, + name: "PairDotProdMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedInt8x64", + name: "PairDotProdMaskedInt16x16", argLen: 3, generic: true, }, { - name: "GreaterMaskedInt8x64", + name: "PairDotProdMaskedInt16x32", argLen: 3, generic: true, }, { - name: "LessInt8x64", + name: "PairwiseAddFloat32x4", argLen: 2, generic: true, }, { - name: "LessEqualInt8x64", + name: "PairwiseAddFloat32x8", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt8x64", - argLen: 3, + name: "PairwiseAddFloat64x2", + argLen: 2, generic: true, }, { - name: "LessMaskedInt8x64", - argLen: 3, + name: "PairwiseAddFloat64x4", + argLen: 2, generic: true, }, { - name: "MaxInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddInt16x8", + argLen: 2, + generic: true, }, { - name: "MaxMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseAddInt16x16", + argLen: 2, + generic: true, }, { - name: "MinInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddInt32x4", + argLen: 2, + generic: true, }, { - name: "MinMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseAddInt32x8", + argLen: 2, + generic: true, }, { - name: "NotEqualInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddUint16x8", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseAddUint16x16", + argLen: 2, + generic: true, }, { - name: "PopCountInt8x64", - argLen: 1, + name: "PairwiseAddUint32x4", + argLen: 2, generic: true, }, { - name: "PopCountMaskedInt8x64", + name: "PairwiseAddUint32x8", argLen: 2, generic: true, }, { - name: "SaturatedAddInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubFloat32x4", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseSubFloat32x8", + argLen: 2, + generic: true, }, { - name: "SaturatedSubInt8x64", + name: "PairwiseSubFloat64x2", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedInt8x64", - argLen: 3, + name: "PairwiseSubFloat64x4", + argLen: 2, generic: true, }, { - name: "SubInt8x64", + name: "PairwiseSubInt16x8", argLen: 2, generic: true, }, { - name: "SubMaskedInt8x64", - argLen: 3, + name: "PairwiseSubInt16x16", + argLen: 2, generic: true, }, { - name: "AddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubInt32x4", + argLen: 2, + generic: true, }, { - name: "AddMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseSubInt32x8", + argLen: 2, + generic: true, }, { - name: "AndUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubUint16x8", + argLen: 2, + generic: true, }, { - name: "AndNotUint16x16", + name: "PairwiseSubUint16x16", argLen: 2, generic: true, }, { - name: "AverageUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubUint32x4", + argLen: 2, + generic: true, }, { - name: "AverageMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseSubUint32x8", + argLen: 2, + generic: true, }, { - name: "CompressUint16x16", - argLen: 2, + name: "Permute2Float32x4", + argLen: 3, generic: true, }, { - name: "EqualUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Float32x8", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Float32x16", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x16", - argLen: 2, + name: "Permute2Float64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint16x16", - argLen: 2, + name: "Permute2Float64x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint16x16", + name: "Permute2Float64x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint16x16", + name: "Permute2Int8x16", argLen: 3, generic: true, }, { - name: "LessUint16x16", - argLen: 2, + name: "Permute2Int8x32", + argLen: 3, generic: true, }, { - name: "LessEqualUint16x16", - argLen: 2, + name: "Permute2Int8x64", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int16x8", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint16x16", + name: "Permute2Int16x16", argLen: 3, generic: true, }, { - name: "LessMaskedUint16x16", + name: "Permute2Int16x32", argLen: 3, generic: true, }, { - name: "MaxUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Int32x4", + argLen: 3, + generic: true, }, { - name: "MaxMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Int32x8", + argLen: 3, + generic: true, }, { - name: "MinUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Int32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Int64x2", + argLen: 3, + generic: true, }, { - name: "MulHighUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Int64x4", + argLen: 3, + generic: true, }, { - name: "MulHighMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Int64x8", + argLen: 3, + generic: true, }, { - name: "NotEqualUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2MaskedFloat32x4", + argLen: 4, + generic: true, }, { - name: "NotEqualMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2MaskedFloat32x8", + argLen: 4, + generic: true, }, { - name: "OrUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2MaskedFloat32x16", + argLen: 4, + generic: true, }, { - name: "PairwiseAddUint16x16", - argLen: 2, + name: "Permute2MaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "PairwiseSubUint16x16", - argLen: 2, + name: "Permute2MaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "PermuteInt16x16", - argLen: 2, + name: "Permute2MaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "PermuteUint16x16", - argLen: 2, + name: "Permute2MaskedInt8x16", + argLen: 4, generic: true, }, { - name: "Permute2Uint16x16", - argLen: 3, + name: "Permute2MaskedInt8x32", + argLen: 4, generic: true, }, { - name: "Permute2Int16x16", - argLen: 3, + name: "Permute2MaskedInt8x64", + argLen: 4, generic: true, }, { - name: "Permute2MaskedUint16x16", + name: "Permute2MaskedInt16x8", argLen: 4, generic: true, }, @@ -66064,3385 +66353,3090 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt16x16", - argLen: 3, + name: "Permute2MaskedInt16x32", + argLen: 4, generic: true, }, { - name: "PermuteMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedInt32x4", + argLen: 4, generic: true, }, { - name: "PopCountUint16x16", - argLen: 1, + name: "Permute2MaskedInt32x8", + argLen: 4, generic: true, }, { - name: "PopCountMaskedUint16x16", - argLen: 2, + name: "Permute2MaskedInt32x16", + argLen: 4, generic: true, }, { - name: "SaturatedAddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2MaskedInt64x2", + argLen: 4, + generic: true, }, { - name: "SaturatedAddMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2MaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "SaturatedSubUint16x16", - argLen: 2, + name: "Permute2MaskedInt64x8", + argLen: 4, generic: true, }, { - name: "SaturatedSubMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint8x16", + argLen: 4, generic: true, }, { - name: "ShiftAllLeftUint16x16", - argLen: 2, + name: "Permute2MaskedUint8x32", + argLen: 4, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint8x64", + argLen: 4, generic: true, }, { - name: "ShiftAllRightUint16x16", - argLen: 2, + name: "Permute2MaskedUint16x8", + argLen: 4, generic: true, }, { - name: "ShiftAllRightMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint16x16", + argLen: 4, generic: true, }, { - name: "ShiftLeftUint16x16", - argLen: 2, + name: "Permute2MaskedUint16x32", + argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x16", - argLen: 3, + name: "Permute2MaskedUint32x4", + argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + name: "Permute2MaskedUint32x8", argLen: 4, generic: true, }, { - name: "ShiftLeftMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint32x16", + argLen: 4, generic: true, }, { - name: "ShiftRightUint16x16", - argLen: 2, + name: "Permute2MaskedUint64x2", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x16", - argLen: 3, + name: "Permute2MaskedUint64x4", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x16", + name: "Permute2MaskedUint64x8", argLen: 4, generic: true, }, { - name: "ShiftRightMaskedUint16x16", + name: "Permute2Uint8x16", argLen: 3, generic: true, }, { - name: "SubUint16x16", - argLen: 2, + name: "Permute2Uint8x32", + argLen: 3, generic: true, }, { - name: "SubMaskedUint16x16", + name: "Permute2Uint8x64", argLen: 3, generic: true, }, { - name: "XorUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AverageUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Uint16x8", + argLen: 3, + generic: true, }, { - name: "AverageMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Uint16x16", + argLen: 3, + generic: true, }, { - name: "CompressUint16x32", - argLen: 2, + name: "Permute2Uint16x32", + argLen: 3, generic: true, }, { - name: "EqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Uint32x4", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Uint32x8", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x32", - argLen: 2, + name: "Permute2Uint32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint16x32", - argLen: 2, + name: "Permute2Uint64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint16x32", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint16x32", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, { - name: "LessUint16x32", + name: "PermuteFloat32x8", argLen: 2, generic: true, }, { - name: "LessEqualUint16x32", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, { - name: "LessEqualMaskedUint16x32", - argLen: 3, + name: "PermuteFloat64x4", + argLen: 2, generic: true, }, { - name: "LessMaskedUint16x32", - argLen: 3, + name: "PermuteFloat64x8", + argLen: 2, generic: true, }, { - name: "MaxUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt8x16", + argLen: 2, + generic: true, }, { - name: "MaxMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt8x32", + argLen: 2, + generic: true, }, { - name: "MinUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt8x64", + argLen: 2, + generic: true, }, { - name: "MinMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt16x8", + argLen: 2, + generic: true, }, { - name: "MulHighUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt16x16", + argLen: 2, + generic: true, }, { - name: "MulHighMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt16x32", + argLen: 2, + generic: true, }, { - name: "NotEqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt32x8", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt32x16", + argLen: 2, + generic: true, }, { - name: "PermuteUint16x32", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "PermuteInt16x32", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "Permute2Uint16x32", + name: "PermuteMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "Permute2Int16x32", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint16x32", - argLen: 4, + name: "PermuteMaskedFloat64x4", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt16x32", - argLen: 4, + name: "PermuteMaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x32", + name: "PermuteMaskedInt8x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint16x32", + name: "PermuteMaskedInt8x32", argLen: 3, generic: true, }, { - name: "PopCountUint16x32", - argLen: 1, + name: "PermuteMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint16x32", - argLen: 2, + name: "PermuteMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "SaturatedAddUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedSubUint16x32", - argLen: 2, + name: "PermuteMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedUint16x32", + name: "PermuteMaskedInt16x32", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint16x32", - argLen: 2, + name: "PermuteMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x32", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint16x32", - argLen: 2, + name: "PermuteMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint16x32", + name: "PermuteMaskedInt64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint16x32", - argLen: 2, + name: "PermuteMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x32", + name: "PermuteMaskedUint8x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x32", - argLen: 4, + name: "PermuteMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint16x32", + name: "PermuteMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftRightUint16x32", - argLen: 2, + name: "PermuteMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x32", + name: "PermuteMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x32", - argLen: 4, + name: "PermuteMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint16x32", + name: "PermuteMaskedUint32x16", argLen: 3, generic: true, }, { - name: "SubUint16x32", - argLen: 2, + name: "PermuteMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "SubMaskedUint16x32", + name: "PermuteMaskedUint64x8", argLen: 3, generic: true, }, { - name: "AddUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint16x8", + name: "PermuteUint8x16", argLen: 2, generic: true, }, { - name: "AverageUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteUint8x32", + argLen: 2, + generic: true, }, { - name: "AverageMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteUint8x64", + argLen: 2, + generic: true, }, { - name: "CompressUint16x8", + name: "PermuteUint16x8", argLen: 2, generic: true, }, { - name: "EqualUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteUint16x16", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteUint16x32", + argLen: 2, + generic: true, }, { - name: "GreaterUint16x8", + name: "PermuteUint32x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x8", + name: "PermuteUint32x16", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint16x8", - argLen: 3, + name: "PermuteUint64x4", + argLen: 2, generic: true, }, { - name: "GreaterMaskedUint16x8", - argLen: 3, + name: "PermuteUint64x8", + argLen: 2, generic: true, }, { - name: "LessUint16x8", - argLen: 2, + name: "PopCountInt8x16", + argLen: 1, generic: true, }, { - name: "LessEqualUint16x8", - argLen: 2, + name: "PopCountInt8x32", + argLen: 1, generic: true, }, { - name: "LessEqualMaskedUint16x8", - argLen: 3, + name: "PopCountInt8x64", + argLen: 1, generic: true, }, { - name: "LessMaskedUint16x8", - argLen: 3, + name: "PopCountInt16x8", + argLen: 1, generic: true, }, { - name: "MaxUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt16x16", + argLen: 1, + generic: true, }, { - name: "MaxMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt16x32", + argLen: 1, + generic: true, }, { - name: "MinUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt32x4", + argLen: 1, + generic: true, }, { - name: "MinMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt32x8", + argLen: 1, + generic: true, }, { - name: "MulHighUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt32x16", + argLen: 1, + generic: true, }, { - name: "MulHighMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt64x2", + argLen: 1, + generic: true, }, { - name: "NotEqualUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt64x4", + argLen: 1, + generic: true, }, { - name: "NotEqualMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt64x8", + argLen: 1, + generic: true, }, { - name: "OrUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountMaskedInt8x16", + argLen: 2, + generic: true, }, { - name: "PairwiseAddUint16x8", + name: "PopCountMaskedInt8x32", argLen: 2, generic: true, }, { - name: "PairwiseSubUint16x8", + name: "PopCountMaskedInt8x64", argLen: 2, generic: true, }, { - name: "PermuteInt16x8", + name: "PopCountMaskedInt16x8", argLen: 2, generic: true, }, { - name: "PermuteUint16x8", + name: "PopCountMaskedInt16x16", argLen: 2, generic: true, }, { - name: "Permute2Uint16x8", - argLen: 3, + name: "PopCountMaskedInt16x32", + argLen: 2, generic: true, }, { - name: "Permute2Int16x8", - argLen: 3, + name: "PopCountMaskedInt32x4", + argLen: 2, generic: true, }, { - name: "Permute2MaskedInt16x8", - argLen: 4, + name: "PopCountMaskedInt32x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint16x8", - argLen: 4, + name: "PopCountMaskedInt32x16", + argLen: 2, generic: true, }, { - name: "PermuteMaskedInt16x8", - argLen: 3, + name: "PopCountMaskedInt64x2", + argLen: 2, generic: true, }, { - name: "PermuteMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedInt64x4", + argLen: 2, generic: true, }, { - name: "PopCountUint16x8", - argLen: 1, + name: "PopCountMaskedInt64x8", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint16x8", + name: "PopCountMaskedUint8x16", argLen: 2, generic: true, }, { - name: "SaturatedAddUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountMaskedUint8x32", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountMaskedUint8x64", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint16x8", + name: "PopCountMaskedUint16x8", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint16x8", + name: "PopCountMaskedUint16x32", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftAllRightUint16x8", + name: "PopCountMaskedUint32x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftUint16x8", + name: "PopCountMaskedUint64x2", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x8", - argLen: 3, + name: "PopCountMaskedUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x8", - argLen: 4, + name: "PopCountMaskedUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint16x8", - argLen: 3, + name: "PopCountUint8x16", + argLen: 1, generic: true, }, { - name: "ShiftRightUint16x8", - argLen: 2, + name: "PopCountUint8x32", + argLen: 1, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x8", - argLen: 3, + name: "PopCountUint8x64", + argLen: 1, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x8", - argLen: 4, + name: "PopCountUint16x8", + argLen: 1, generic: true, }, { - name: "ShiftRightMaskedUint16x8", - argLen: 3, + name: "PopCountUint16x16", + argLen: 1, generic: true, }, { - name: "SubUint16x8", - argLen: 2, + name: "PopCountUint16x32", + argLen: 1, generic: true, }, { - name: "SubMaskedUint16x8", - argLen: 3, + name: "PopCountUint32x4", + argLen: 1, generic: true, }, { - name: "XorUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint32x8", + argLen: 1, + generic: true, }, { - name: "AddUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint32x16", + argLen: 1, + generic: true, }, { - name: "AddMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountUint64x2", + argLen: 1, + generic: true, }, { - name: "AndUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint64x4", + argLen: 1, + generic: true, }, { - name: "AndMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountUint64x8", + argLen: 1, + generic: true, }, { - name: "AndNotUint32x16", + name: "RotateLeftInt32x4", argLen: 2, generic: true, }, { - name: "AndNotMaskedUint32x16", - argLen: 3, + name: "RotateLeftInt32x8", + argLen: 2, generic: true, }, { - name: "CompressUint32x16", + name: "RotateLeftInt32x16", argLen: 2, generic: true, }, { - name: "EqualUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftInt64x2", + argLen: 2, + generic: true, }, { - name: "GreaterUint32x16", + name: "RotateLeftInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x16", + name: "RotateLeftInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint32x16", + name: "RotateLeftMaskedInt32x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint32x16", + name: "RotateLeftMaskedInt32x8", argLen: 3, generic: true, }, { - name: "LessUint32x16", - argLen: 2, + name: "RotateLeftMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "LessEqualUint32x16", - argLen: 2, + name: "RotateLeftMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint32x16", + name: "RotateLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "LessMaskedUint32x16", + name: "RotateLeftMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaxUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "MaxMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "MinUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint64x2", + argLen: 3, + generic: true, }, { - name: "NotEqualUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint64x4", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint64x8", + argLen: 3, + generic: true, }, { - name: "OrUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftUint32x4", + argLen: 2, + generic: true, }, { - name: "OrMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftUint32x8", + argLen: 2, + generic: true, }, { - name: "PermuteInt32x16", + name: "RotateLeftUint32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x16", + name: "RotateLeftUint64x2", argLen: 2, generic: true, }, { - name: "PermuteUint32x16", + name: "RotateLeftUint64x4", argLen: 2, generic: true, }, { - name: "Permute2Uint32x16", - argLen: 3, + name: "RotateLeftUint64x8", + argLen: 2, generic: true, }, { - name: "Permute2Float32x16", - argLen: 3, + name: "RotateRightInt32x4", + argLen: 2, generic: true, }, { - name: "Permute2Int32x16", - argLen: 3, + name: "RotateRightInt32x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint32x16", - argLen: 4, + name: "RotateRightInt32x16", + argLen: 2, generic: true, }, { - name: "Permute2MaskedInt32x16", - argLen: 4, + name: "RotateRightInt64x2", + argLen: 2, generic: true, }, { - name: "Permute2MaskedFloat32x16", - argLen: 4, + name: "RotateRightInt64x4", + argLen: 2, generic: true, }, { - name: "PermuteMaskedFloat32x16", - argLen: 3, + name: "RotateRightInt64x8", + argLen: 2, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "RotateRightMaskedInt32x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x16", + name: "RotateRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "PopCountUint32x16", - argLen: 1, + name: "RotateRightMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint32x16", - argLen: 2, + name: "RotateRightMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "RotateLeftUint32x16", - argLen: 2, + name: "RotateRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedUint32x16", + name: "RotateRightMaskedInt64x8", argLen: 3, generic: true, }, { - name: "RotateRightUint32x16", - argLen: 2, + name: "RotateRightMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedUint32x16", + name: "RotateRightMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint32x16", - argLen: 2, + name: "RotateRightMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x16", + name: "RotateRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint32x16", - argLen: 2, + name: "RotateRightMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint32x16", + name: "RotateRightMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint32x16", + name: "RotateRightUint32x4", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x16", - argLen: 3, + name: "RotateRightUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x16", - argLen: 4, + name: "RotateRightUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint32x16", - argLen: 3, + name: "RotateRightUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightUint32x16", + name: "RotateRightUint64x4", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x16", - argLen: 3, + name: "RotateRightUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x16", - argLen: 4, + name: "RoundFloat32x4", + argLen: 1, generic: true, }, { - name: "ShiftRightMaskedUint32x16", - argLen: 3, + name: "RoundFloat32x8", + argLen: 1, generic: true, }, { - name: "SubUint32x16", - argLen: 2, + name: "RoundFloat64x2", + argLen: 1, generic: true, }, { - name: "SubMaskedUint32x16", - argLen: 3, + name: "RoundFloat64x4", + argLen: 1, generic: true, }, { - name: "XorUint32x16", + name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedUint32x16", - argLen: 3, + name: "SaturatedAddInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x4", + name: "SaturatedAddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedUint32x4", - argLen: 3, + name: "SaturatedAddInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x4", + name: "SaturatedAddInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedUint32x4", - argLen: 3, + name: "SaturatedAddInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x4", - argLen: 2, - generic: true, - }, - { - name: "AndNotMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "CompressUint32x4", - argLen: 2, - generic: true, - }, - { - name: "EqualUint32x4", - argLen: 2, + name: "SaturatedAddMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedUint32x4", + name: "SaturatedAddMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessUint32x4", - argLen: 2, - generic: true, + name: "SaturatedAddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualUint32x4", - argLen: 2, - generic: true, + name: "SaturatedAddMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxUint32x4", - argLen: 2, + name: "SaturatedAddMaskedUint8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedUint32x4", + name: "SaturatedAddMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MinUint32x4", - argLen: 2, + name: "SaturatedAddMaskedUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedUint32x4", + name: "SaturatedAddMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x4", + name: "SaturatedAddUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x4", + name: "SaturatedAddUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedUint32x4", - argLen: 3, + name: "SaturatedAddUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x4", + name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrMaskedUint32x4", - argLen: 3, + name: "SaturatedAddUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x4", - argLen: 2, - generic: true, - }, - { - name: "Permute2Float32x4", - argLen: 3, - generic: true, + name: "SaturatedAddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Permute2Uint32x4", + name: "SaturatedPairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, { - name: "Permute2Int32x4", + name: "SaturatedPairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedInt32x4", - argLen: 4, + name: "SaturatedPairDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "Permute2MaskedUint32x4", + name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat32x4", + name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLen: 4, generic: true, }, { - name: "PopCountUint32x4", - argLen: 1, + name: "SaturatedPairDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "PopCountMaskedUint32x4", + name: "SaturatedPairwiseAddInt16x8", argLen: 2, generic: true, }, { - name: "RotateLeftUint32x4", + name: "SaturatedPairwiseAddInt16x16", argLen: 2, generic: true, }, { - name: "RotateLeftMaskedUint32x4", - argLen: 3, + name: "SaturatedPairwiseSubInt16x8", + argLen: 2, generic: true, }, { - name: "RotateRightUint32x4", + name: "SaturatedPairwiseSubInt16x16", argLen: 2, generic: true, }, { - name: "RotateRightMaskedUint32x4", - argLen: 3, + name: "SaturatedSubInt8x16", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint32x4", + name: "SaturatedSubInt8x32", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x4", - argLen: 3, + name: "SaturatedSubInt8x64", + argLen: 2, generic: true, }, { - name: "ShiftAllRightUint32x4", + name: "SaturatedSubInt16x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedUint32x4", - argLen: 3, + name: "SaturatedSubInt16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftUint32x4", + name: "SaturatedSubInt16x32", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x4", + name: "SaturatedSubMaskedInt8x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x4", - argLen: 4, + name: "SaturatedSubMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint32x4", + name: "SaturatedSubMaskedInt8x64", argLen: 3, generic: true, }, { - name: "ShiftRightUint32x4", - argLen: 2, + name: "SaturatedSubMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x4", + name: "SaturatedSubMaskedInt16x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x4", - argLen: 4, + name: "SaturatedSubMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint32x4", + name: "SaturatedSubMaskedUint8x16", argLen: 3, generic: true, }, { - name: "SubUint32x4", - argLen: 2, + name: "SaturatedSubMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "SubMaskedUint32x4", + name: "SaturatedSubMaskedUint8x64", argLen: 3, generic: true, }, { - name: "XorUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedSubMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "AndUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedSubMaskedUint16x16", + argLen: 3, + generic: true, }, { - name: "AndMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedSubMaskedUint16x32", + argLen: 3, + generic: true, }, { - name: "AndNotUint32x8", + name: "SaturatedSubUint8x16", argLen: 2, generic: true, }, { - name: "AndNotMaskedUint32x8", - argLen: 3, + name: "SaturatedSubUint8x32", + argLen: 2, generic: true, }, { - name: "CompressUint32x8", + name: "SaturatedSubUint8x64", argLen: 2, generic: true, }, { - name: "EqualUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedSubUint16x8", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedSubUint16x16", + argLen: 2, + generic: true, }, { - name: "GreaterUint32x8", + name: "SaturatedSubUint16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x8", - argLen: 2, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint32x8", + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint32x8", + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLen: 3, generic: true, }, { - name: "LessUint32x8", + name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLen: 2, generic: true, }, { - name: "LessEqualUint32x8", + name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLen: 2, generic: true, }, { - name: "LessEqualMaskedUint32x8", - argLen: 3, + name: "SaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 2, generic: true, }, { - name: "LessMaskedUint32x8", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLen: 3, generic: true, }, { - name: "MaxUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, }, { - name: "MinUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, }, { - name: "MulEvenWidenUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "NotEqualUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "NotEqualMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x8", + argLen: 2, + generic: true, }, { - name: "OrUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x16", + argLen: 2, + generic: true, }, { - name: "OrMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x32", + argLen: 2, + generic: true, }, { - name: "PairwiseAddUint32x8", + name: "ShiftAllLeftInt32x4", argLen: 2, generic: true, }, { - name: "PairwiseSubUint32x8", + name: "ShiftAllLeftInt32x8", argLen: 2, generic: true, }, { - name: "PermuteUint32x8", + name: "ShiftAllLeftInt32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x8", + name: "ShiftAllLeftInt64x2", argLen: 2, generic: true, }, { - name: "PermuteInt32x8", + name: "ShiftAllLeftInt64x4", argLen: 2, generic: true, }, { - name: "Permute2Int32x8", - argLen: 3, + name: "ShiftAllLeftInt64x8", + argLen: 2, generic: true, }, { - name: "Permute2Float32x8", + name: "ShiftAllLeftMaskedInt16x8", argLen: 3, generic: true, }, { - name: "Permute2Uint32x8", + name: "ShiftAllLeftMaskedInt16x16", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat32x8", - argLen: 4, + name: "ShiftAllLeftMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "Permute2MaskedUint32x8", - argLen: 4, + name: "ShiftAllLeftMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt32x8", - argLen: 4, + name: "ShiftAllLeftMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "PermuteMaskedInt32x8", + name: "ShiftAllLeftMaskedInt32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x8", + name: "ShiftAllLeftMaskedInt64x2", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat32x8", + name: "ShiftAllLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "PopCountUint32x8", - argLen: 1, + name: "ShiftAllLeftMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "RotateLeftUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedUint32x8", + name: "ShiftAllLeftMaskedUint16x32", argLen: 3, generic: true, }, { - name: "RotateRightUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedUint32x8", + name: "ShiftAllLeftMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x8", + name: "ShiftAllLeftMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint32x8", + name: "ShiftAllLeftMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint32x8", + name: "ShiftAllLeftUint16x8", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x8", - argLen: 3, + name: "ShiftAllLeftUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x8", - argLen: 4, + name: "ShiftAllLeftUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint32x8", - argLen: 3, + name: "ShiftAllLeftUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftRightUint32x8", + name: "ShiftAllLeftUint32x8", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x8", - argLen: 3, + name: "ShiftAllLeftUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x8", - argLen: 4, + name: "ShiftAllLeftUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedUint32x8", - argLen: 3, + name: "ShiftAllLeftUint64x4", + argLen: 2, generic: true, }, { - name: "SubUint32x8", + name: "ShiftAllLeftUint64x8", argLen: 2, generic: true, }, { - name: "SubMaskedUint32x8", - argLen: 3, + name: "ShiftAllRightInt16x8", + argLen: 2, generic: true, }, { - name: "XorUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightInt16x16", + argLen: 2, + generic: true, }, { - name: "AndNotUint64x2", + name: "ShiftAllRightInt16x32", argLen: 2, generic: true, }, { - name: "AndNotMaskedUint64x2", - argLen: 3, + name: "ShiftAllRightInt32x4", + argLen: 2, generic: true, }, { - name: "CompressUint64x2", + name: "ShiftAllRightInt32x8", argLen: 2, generic: true, }, { - name: "EqualUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt32x16", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightInt64x2", + argLen: 2, + generic: true, }, { - name: "GreaterUint64x2", + name: "ShiftAllRightInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint64x2", + name: "ShiftAllRightInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint64x2", + name: "ShiftAllRightMaskedInt16x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint64x2", + name: "ShiftAllRightMaskedInt16x16", argLen: 3, generic: true, }, { - name: "LessUint64x2", - argLen: 2, + name: "ShiftAllRightMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x2", - argLen: 2, + name: "ShiftAllRightMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint64x2", + name: "ShiftAllRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "LessMaskedUint64x2", + name: "ShiftAllRightMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaxUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MinUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedInt64x2", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedInt64x8", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "NotEqualUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint16x16", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint16x32", + argLen: 3, + generic: true, }, { - name: "OrUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "OrMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "Permute2Float64x2", + name: "ShiftAllRightMaskedUint32x16", argLen: 3, generic: true, }, { - name: "Permute2Uint64x2", + name: "ShiftAllRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "Permute2Int64x2", + name: "ShiftAllRightMaskedUint64x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedInt64x2", - argLen: 4, + name: "ShiftAllRightMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x2", - argLen: 4, + name: "ShiftAllRightUint16x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint64x2", - argLen: 4, + name: "ShiftAllRightUint16x16", + argLen: 2, generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, + name: "ShiftAllRightUint16x32", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint64x2", + name: "ShiftAllRightUint32x4", argLen: 2, generic: true, }, { - name: "RotateLeftUint64x2", + name: "ShiftAllRightUint32x8", argLen: 2, generic: true, }, { - name: "RotateLeftMaskedUint64x2", - argLen: 3, + name: "ShiftAllRightUint32x16", + argLen: 2, generic: true, }, { - name: "RotateRightUint64x2", + name: "ShiftAllRightUint64x2", argLen: 2, generic: true, }, { - name: "RotateRightMaskedUint64x2", - argLen: 3, + name: "ShiftAllRightUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x2", + name: "ShiftAllRightUint64x8", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromInt16x8", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt16x16", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromInt16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x2", + name: "ShiftLeftAndFillUpperFromInt32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x2", - argLen: 4, + name: "ShiftLeftAndFillUpperFromInt32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x2", + name: "ShiftLeftAndFillUpperFromInt64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLen: 4, generic: true, }, { - name: "ShiftRightMaskedUint64x2", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, { - name: "SubUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt16x32", + argLen: 4, generic: true, }, { - name: "SubMaskedUint64x2", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "XorUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "XorMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "AddUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x2", + argLen: 4, + generic: true, }, { - name: "AddMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "AndUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, }, { - name: "AndMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint16x8", + argLen: 4, + generic: true, }, { - name: "AndNotUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + argLen: 4, generic: true, }, { - name: "AndNotMaskedUint64x4", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedUint16x32", + argLen: 4, generic: true, }, { - name: "CompressUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint32x4", + argLen: 4, generic: true, }, { - name: "EqualUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint32x8", + argLen: 4, + generic: true, }, { - name: "EqualMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint32x16", + argLen: 4, + generic: true, }, { - name: "GreaterUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x2", + argLen: 4, generic: true, }, { - name: "GreaterEqualUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedUint64x4", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedUint64x8", + argLen: 4, generic: true, }, { - name: "GreaterMaskedUint64x4", + name: "ShiftLeftAndFillUpperFromUint16x8", argLen: 3, generic: true, }, { - name: "LessUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint16x16", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint16x32", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint64x4", + name: "ShiftLeftAndFillUpperFromUint32x4", argLen: 3, generic: true, }, { - name: "LessMaskedUint64x4", + name: "ShiftLeftAndFillUpperFromUint32x8", argLen: 3, generic: true, }, { - name: "MaxUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint32x16", + argLen: 3, + generic: true, }, { - name: "MinUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint64x2", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint64x4", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint64x8", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftInt16x8", + argLen: 2, + generic: true, }, { - name: "NotEqualUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt16x16", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftInt16x32", + argLen: 2, + generic: true, }, { - name: "OrUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt32x4", + argLen: 2, + generic: true, }, { - name: "OrMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftInt32x8", + argLen: 2, + generic: true, }, { - name: "PermuteUint64x4", + name: "ShiftLeftInt32x16", argLen: 2, generic: true, }, { - name: "PermuteInt64x4", + name: "ShiftLeftInt64x2", argLen: 2, generic: true, }, { - name: "PermuteFloat64x4", + name: "ShiftLeftInt64x4", argLen: 2, generic: true, }, { - name: "Permute2Uint64x4", - argLen: 3, + name: "ShiftLeftInt64x8", + argLen: 2, generic: true, }, { - name: "Permute2Int64x4", + name: "ShiftLeftMaskedInt16x8", argLen: 3, generic: true, }, { - name: "Permute2Float64x4", + name: "ShiftLeftMaskedInt16x16", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x4", - argLen: 4, + name: "ShiftLeftMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x4", - argLen: 4, + name: "ShiftLeftMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt64x4", - argLen: 4, + name: "ShiftLeftMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "ShiftLeftMaskedInt32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat64x4", + name: "ShiftLeftMaskedInt64x2", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "ShiftLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, + name: "ShiftLeftMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "RotateLeftUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedUint64x4", + name: "ShiftLeftMaskedUint16x32", argLen: 3, generic: true, }, { - name: "RotateRightUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedUint64x4", + name: "ShiftLeftMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x4", + name: "ShiftLeftMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint64x4", + name: "ShiftLeftMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x4", + name: "ShiftLeftUint16x8", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x4", - argLen: 3, + name: "ShiftLeftUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x4", - argLen: 4, + name: "ShiftLeftUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint64x4", - argLen: 3, + name: "ShiftLeftUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftRightUint64x4", + name: "ShiftLeftUint32x8", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x4", - argLen: 3, + name: "ShiftLeftUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x4", - argLen: 4, + name: "ShiftLeftUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedUint64x4", - argLen: 3, + name: "ShiftLeftUint64x4", + argLen: 2, generic: true, }, { - name: "SubUint64x4", + name: "ShiftLeftUint64x8", argLen: 2, generic: true, }, { - name: "SubMaskedUint64x4", + name: "ShiftRightAndFillUpperFromInt16x8", argLen: 3, generic: true, }, { - name: "XorUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt16x16", + argLen: 3, + generic: true, }, { - name: "AndMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt16x32", + argLen: 3, + generic: true, }, { - name: "AndNotUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x4", + argLen: 3, generic: true, }, { - name: "AndNotMaskedUint64x8", + name: "ShiftRightAndFillUpperFromInt32x8", argLen: 3, generic: true, }, { - name: "CompressUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x16", + argLen: 3, generic: true, }, { - name: "EqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt64x2", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt64x4", + argLen: 3, + generic: true, }, { - name: "GreaterUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt64x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt16x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, { - name: "GreaterMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt16x32", + argLen: 4, generic: true, }, { - name: "LessUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "LessEqualUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "LessEqualMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "LessMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt64x2", + argLen: 4, generic: true, }, { - name: "MaxUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "MaxMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, }, { - name: "MinUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x8", + argLen: 4, + generic: true, }, { - name: "MinMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x16", + argLen: 4, + generic: true, }, { - name: "MulEvenWidenUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x32", + argLen: 4, + generic: true, }, { - name: "MulEvenWidenMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x4", + argLen: 4, + generic: true, }, { - name: "NotEqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x8", + argLen: 4, + generic: true, }, { - name: "NotEqualMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x16", + argLen: 4, + generic: true, }, { - name: "OrUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x2", + argLen: 4, + generic: true, }, { - name: "OrMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x4", + argLen: 4, + generic: true, }, { - name: "PermuteUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint64x8", + argLen: 4, generic: true, }, { - name: "PermuteFloat64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x8", + argLen: 3, generic: true, }, { - name: "PermuteInt64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x16", + argLen: 3, generic: true, }, { - name: "Permute2Float64x8", + name: "ShiftRightAndFillUpperFromUint16x32", argLen: 3, generic: true, }, { - name: "Permute2Uint64x8", + name: "ShiftRightAndFillUpperFromUint32x4", argLen: 3, generic: true, }, { - name: "Permute2Int64x8", + name: "ShiftRightAndFillUpperFromUint32x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x8", - argLen: 4, + name: "ShiftRightAndFillUpperFromUint32x16", + argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x8", - argLen: 4, + name: "ShiftRightAndFillUpperFromUint64x2", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt64x8", - argLen: 4, + name: "ShiftRightAndFillUpperFromUint64x4", + argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x8", + name: "ShiftRightAndFillUpperFromUint64x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat64x8", - argLen: 3, + name: "ShiftRightInt16x8", + argLen: 2, generic: true, }, { - name: "PermuteMaskedUint64x8", - argLen: 3, + name: "ShiftRightInt16x16", + argLen: 2, generic: true, }, { - name: "PopCountUint64x8", - argLen: 1, + name: "ShiftRightInt16x32", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint64x8", + name: "ShiftRightInt32x4", argLen: 2, generic: true, }, { - name: "RotateLeftUint64x8", + name: "ShiftRightInt32x8", argLen: 2, generic: true, }, { - name: "RotateLeftMaskedUint64x8", - argLen: 3, + name: "ShiftRightInt32x16", + argLen: 2, generic: true, }, { - name: "RotateRightUint64x8", + name: "ShiftRightInt64x2", argLen: 2, generic: true, }, { - name: "RotateRightMaskedUint64x8", - argLen: 3, + name: "ShiftRightInt64x4", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x8", + name: "ShiftRightInt64x8", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x8", + name: "ShiftRightMaskedInt16x8", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint64x8", - argLen: 2, + name: "ShiftRightMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint64x8", + name: "ShiftRightMaskedInt16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x8", - argLen: 2, + name: "ShiftRightMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x8", + name: "ShiftRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x8", - argLen: 4, + name: "ShiftRightMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint64x8", + name: "ShiftRightMaskedInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x8", - argLen: 2, + name: "ShiftRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x8", + name: "ShiftRightMaskedInt64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x8", - argLen: 4, + name: "ShiftRightMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint64x8", + name: "ShiftRightMaskedUint16x16", argLen: 3, generic: true, }, { - name: "SubUint64x8", - argLen: 2, + name: "ShiftRightMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "SubMaskedUint64x8", + name: "ShiftRightMaskedUint32x4", argLen: 3, generic: true, }, { - name: "XorUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "XorMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint32x16", + argLen: 3, + generic: true, }, { - name: "AddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint64x2", + argLen: 3, + generic: true, }, { - name: "AddMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint64x4", + argLen: 3, + generic: true, }, { - name: "AndUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint64x8", + argLen: 3, + generic: true, }, { - name: "AndNotUint8x16", + name: "ShiftRightUint16x8", argLen: 2, generic: true, }, { - name: "AverageUint8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AverageMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightUint16x16", + argLen: 2, + generic: true, }, { - name: "CompressUint8x16", + name: "ShiftRightUint16x32", argLen: 2, generic: true, }, { - name: "EqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightUint32x4", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightUint32x8", + argLen: 2, + generic: true, }, { - name: "GaloisFieldMulUint8x16", + name: "ShiftRightUint32x16", argLen: 2, generic: true, }, { - name: "GaloisFieldMulMaskedUint8x16", - argLen: 3, + name: "ShiftRightUint64x2", + argLen: 2, generic: true, }, { - name: "GreaterUint8x16", + name: "ShiftRightUint64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x16", + name: "ShiftRightUint64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint8x16", - argLen: 3, + name: "SignInt8x16", + argLen: 2, generic: true, }, { - name: "GreaterMaskedUint8x16", - argLen: 3, + name: "SignInt8x32", + argLen: 2, generic: true, }, { - name: "LessUint8x16", + name: "SignInt16x8", argLen: 2, generic: true, }, { - name: "LessEqualUint8x16", + name: "SignInt16x16", argLen: 2, generic: true, }, { - name: "LessEqualMaskedUint8x16", - argLen: 3, + name: "SignInt32x4", + argLen: 2, generic: true, }, { - name: "LessMaskedUint8x16", - argLen: 3, + name: "SignInt32x8", + argLen: 2, generic: true, }, { - name: "MaxUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtFloat32x4", + argLen: 1, + generic: true, }, { - name: "MaxMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SqrtFloat32x8", + argLen: 1, + generic: true, }, { - name: "MinUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtFloat32x16", + argLen: 1, + generic: true, }, { - name: "MinMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SqrtFloat64x2", + argLen: 1, + generic: true, }, { - name: "NotEqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtFloat64x4", + argLen: 1, + generic: true, }, { - name: "NotEqualMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SqrtFloat64x8", + argLen: 1, + generic: true, }, { - name: "OrUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtMaskedFloat32x4", + argLen: 2, + generic: true, }, { - name: "PermuteUint8x16", + name: "SqrtMaskedFloat32x8", argLen: 2, generic: true, }, { - name: "PermuteInt8x16", + name: "SqrtMaskedFloat32x16", argLen: 2, generic: true, }, { - name: "Permute2Uint8x16", - argLen: 3, + name: "SqrtMaskedFloat64x2", + argLen: 2, generic: true, }, { - name: "Permute2Int8x16", - argLen: 3, + name: "SqrtMaskedFloat64x4", + argLen: 2, generic: true, }, { - name: "Permute2MaskedInt8x16", - argLen: 4, + name: "SqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint8x16", - argLen: 4, + name: "SubFloat32x4", + argLen: 2, generic: true, }, { - name: "PermuteMaskedUint8x16", - argLen: 3, + name: "SubFloat32x8", + argLen: 2, generic: true, }, { - name: "PermuteMaskedInt8x16", - argLen: 3, + name: "SubFloat32x16", + argLen: 2, generic: true, }, { - name: "PopCountUint8x16", - argLen: 1, + name: "SubFloat64x2", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint8x16", + name: "SubFloat64x4", argLen: 2, generic: true, }, { - name: "SaturatedAddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SubFloat64x8", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SubInt8x16", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint8x16", + name: "SubInt8x32", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedUint8x16", - argLen: 3, + name: "SubInt8x64", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", + name: "SubInt16x8", argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", - argLen: 3, + name: "SubInt16x16", + argLen: 2, generic: true, }, { - name: "SubUint8x16", + name: "SubInt16x32", argLen: 2, generic: true, }, { - name: "SubMaskedUint8x16", - argLen: 3, + name: "SubInt32x4", + argLen: 2, generic: true, }, { - name: "XorUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SubInt32x8", + argLen: 2, + generic: true, }, { - name: "AddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubInt32x16", + argLen: 2, + generic: true, }, { - name: "AddMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubInt64x2", + argLen: 2, + generic: true, }, { - name: "AndUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubInt64x4", + argLen: 2, + generic: true, }, { - name: "AndNotUint8x32", + name: "SubInt64x8", argLen: 2, generic: true, }, { - name: "AverageUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedFloat32x4", + argLen: 3, + generic: true, }, { - name: "AverageMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "CompressUint8x32", - argLen: 2, + name: "SubMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "EqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedFloat64x4", + argLen: 3, + generic: true, }, { - name: "GaloisFieldMulUint8x32", - argLen: 2, + name: "SubMaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "GaloisFieldMulMaskedUint8x32", + name: "SubMaskedInt8x16", argLen: 3, generic: true, }, { - name: "GreaterUint8x32", - argLen: 2, + name: "SubMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint8x32", - argLen: 2, + name: "SubMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint8x32", + name: "SubMaskedInt16x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint8x32", + name: "SubMaskedInt16x16", argLen: 3, generic: true, }, { - name: "LessUint8x32", - argLen: 2, + name: "SubMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "LessEqualUint8x32", - argLen: 2, + name: "SubMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint8x32", + name: "SubMaskedInt32x8", argLen: 3, generic: true, }, { - name: "LessMaskedUint8x32", + name: "SubMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaxUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedInt64x2", + argLen: 3, + generic: true, }, { - name: "MaxMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "MinUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedInt64x8", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "NotEqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "OrUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "PermuteUint8x32", - argLen: 2, + name: "SubMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "PermuteInt8x32", - argLen: 2, + name: "SubMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "Permute2Int8x32", + name: "SubMaskedUint32x4", argLen: 3, generic: true, }, { - name: "Permute2Uint8x32", + name: "SubMaskedUint32x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint8x32", - argLen: 4, + name: "SubMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt8x32", - argLen: 4, + name: "SubMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x32", + name: "SubMaskedUint64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt8x32", + name: "SubMaskedUint64x8", argLen: 3, generic: true, }, { - name: "PopCountUint8x32", - argLen: 1, + name: "SubUint8x16", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint8x32", + name: "SubUint8x32", argLen: 2, generic: true, }, { - name: "SaturatedAddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint8x64", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubUint16x8", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint8x32", + name: "SubUint16x16", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedUint8x32", - argLen: 3, + name: "SubUint16x32", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", + name: "SubUint32x4", argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", - argLen: 3, + name: "SubUint32x8", + argLen: 2, generic: true, }, { - name: "SubUint8x32", + name: "SubUint32x16", argLen: 2, generic: true, }, { - name: "SubMaskedUint8x32", - argLen: 3, + name: "SubUint64x2", + argLen: 2, generic: true, }, { - name: "XorUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint64x4", + argLen: 2, + generic: true, }, { - name: "AddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint64x8", + argLen: 2, + generic: true, }, { - name: "AddMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "TruncFloat32x4", + argLen: 1, + generic: true, }, { - name: "AverageUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "TruncFloat32x8", + argLen: 1, + generic: true, }, { - name: "AverageMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "TruncFloat64x2", + argLen: 1, + generic: true, }, { - name: "CompressUint8x64", - argLen: 2, + name: "TruncFloat64x4", + argLen: 1, generic: true, }, { - name: "EqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, }, { - name: "GaloisFieldMulUint8x64", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "GaloisFieldMulMaskedUint8x64", - argLen: 3, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "GreaterUint8x64", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualUint8x64", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessUint8x64", - argLen: 2, - generic: true, + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualUint8x64", - argLen: 2, - generic: true, + name: "XorInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxUint8x64", + name: "XorInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint8x64", - argLen: 3, + name: "XorInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinUint8x64", + name: "XorInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint8x64", + name: "XorInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "XorMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualUint8x64", - argLen: 2, + name: "XorMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedUint8x64", + name: "XorMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "PermuteInt8x64", - argLen: 2, - generic: true, + name: "XorMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PermuteUint8x64", - argLen: 2, - generic: true, + name: "XorMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2Uint8x64", - argLen: 3, - generic: true, + name: "XorMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2Int8x64", - argLen: 3, - generic: true, + name: "XorMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2MaskedUint8x64", - argLen: 4, - generic: true, + name: "XorMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2MaskedInt8x64", - argLen: 4, - generic: true, + name: "XorMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PermuteMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PermuteMaskedInt8x64", - argLen: 3, - generic: true, + name: "XorMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountUint8x64", - argLen: 1, - generic: true, + name: "XorMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountMaskedUint8x64", - argLen: 2, - generic: true, + name: "XorUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedAddUint8x64", + name: "XorUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedUint8x64", - argLen: 3, + name: "XorUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubUint8x64", - argLen: 2, - generic: true, + name: "XorUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedSubMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, - generic: true, + name: "XorUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubUint8x64", - argLen: 2, - generic: true, + name: "XorUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, + name: "XorUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CeilWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, @@ -69454,111 +69448,111 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { @@ -69568,1353 +69562,1359 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Float32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Float32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x8", + name: "DiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x2", + name: "FloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x2", + name: "FloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x2", + name: "FloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "FloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x2", + name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat64x4", + name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat64x4", + name: "FloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat64x4", + name: "FloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformInverseMaskedUint8x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformInverseMaskedUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformInverseMaskedUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInverseUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformInverseUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformInverseUint8x64", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Float64x4", + name: "GaloisFieldAffineTransformMaskedUint8x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RoundWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformMaskedUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformMaskedUint8x64", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "Set128Float64x4", + name: "GaloisFieldAffineTransformUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformUint8x64", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x8", + name: "Get128Float32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat64x8", + name: "Get128Float64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x8", + name: "Get128Int8x32", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat64x8", + name: "Get128Int16x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "Get128Int32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x8", + name: "Get128Int64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "Get128Uint8x32", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x8", + name: "Get128Uint16x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "Get128Uint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x8", + name: "Get128Uint64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "GetElemInt8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x8", + name: "GetElemInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x8", + name: "GetElemInt32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x8", + name: "GetElemInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "GetElemUint8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x8", + name: "GetElemUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "Get128Int16x16", + name: "GetElemUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "Set128Int16x16", + name: "GetElemUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x16", + name: "RotateAllLeftInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", + name: "RotateAllLeftInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x16", + name: "RotateAllLeftInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", + name: "RotateAllLeftInt64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x32", + name: "RotateAllLeftInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", + name: "RotateAllLeftInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x32", + name: "RotateAllLeftMaskedInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", + name: "RotateAllLeftMaskedInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemInt16x8", + name: "RotateAllLeftMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemInt16x8", + name: "RotateAllLeftMaskedInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x8", + name: "RotateAllLeftMaskedInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", + name: "RotateAllLeftMaskedInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x8", + name: "RotateAllLeftMaskedUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", + name: "RotateAllLeftMaskedUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "RotateAllLeftInt32x16", + name: "RotateAllLeftMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedInt32x16", + name: "RotateAllLeftMaskedUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt32x16", + name: "RotateAllLeftMaskedUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt32x16", + name: "RotateAllLeftMaskedUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x16", + name: "RotateAllLeftUint32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", + name: "RotateAllLeftUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x16", + name: "RotateAllLeftUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", + name: "RotateAllLeftUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "GetElemInt32x4", + name: "RotateAllLeftUint64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x4", + name: "RotateAllLeftUint64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftMaskedInt32x4", + name: "RotateAllRightInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RotateAllRightInt32x4", + name: "RotateAllRightInt32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllRightMaskedInt32x4", + name: "RotateAllRightInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "SetElemInt32x4", + name: "RotateAllRightInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x4", + name: "RotateAllRightInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", + name: "RotateAllRightInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x4", + name: "RotateAllRightMaskedInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", + name: "RotateAllRightMaskedInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "Get128Int32x8", + name: "RotateAllRightMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftInt32x8", + name: "RotateAllRightMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedInt32x8", + name: "RotateAllRightMaskedInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt32x8", + name: "RotateAllRightMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt32x8", + name: "RotateAllRightMaskedUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Int32x8", + name: "RotateAllRightMaskedUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x8", + name: "RotateAllRightMaskedUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", + name: "RotateAllRightMaskedUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x8", + name: "RotateAllRightMaskedUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", + name: "RotateAllRightMaskedUint64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemInt64x2", + name: "RotateAllRightUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x2", + name: "RotateAllRightUint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftMaskedInt64x2", + name: "RotateAllRightUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RotateAllRightInt64x2", + name: "RotateAllRightUint64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllRightMaskedInt64x2", + name: "RotateAllRightUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "SetElemInt64x2", + name: "RotateAllRightUint64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x2", + name: "RoundWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", + name: "RoundWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x2", + name: "RoundWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", + name: "RoundWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "Get128Int64x4", + name: "RoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x4", + name: "RoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftMaskedInt64x4", + name: "RoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt64x4", + name: "RoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt64x4", + name: "RoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Int64x4", + name: "RoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x4", + name: "RoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", + name: "RoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x4", + name: "Set128Float32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", + name: "Set128Float64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "RotateAllLeftInt64x8", + name: "Set128Int8x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedInt64x8", + name: "Set128Int16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt64x8", + name: "Set128Int32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt64x8", + name: "Set128Int64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x8", + name: "Set128Uint8x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", + name: "Set128Uint16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x8", + name: "Set128Uint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", + name: "Set128Uint64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemInt8x16", + name: "SetElemInt8x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemInt8x16", + name: "SetElemInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int8x32", + name: "SetElemInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Set128Int8x32", + name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint16x16", + name: "SetElemUint8x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Set128Uint16x16", + name: "SetElemUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x16", + name: "SetElemUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", + name: "SetElemUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x16", + name: "ShiftAllLeftAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", + name: "ShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemUint16x8", + name: "ShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemUint16x8", + name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x8", + name: "ShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "RotateAllLeftUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "GetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "SetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint16x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "Get128Uint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Uint32x8", + name: "ShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x8", + name: "ShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", + name: "ShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x8", + name: "ShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", + name: "ShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemUint64x2", + name: "ShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftUint64x2", + name: "ShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightUint64x2", + name: "ShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "SetElemUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "Get128Uint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "Set128Uint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "RotateAllLeftUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x8", + name: "ShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x8", + name: "ShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformUint8x16", + name: "ShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseUint8x16", + name: "ShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x16", + name: "ShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformMaskedUint8x16", + name: "ShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemUint8x16", + name: "TruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemUint8x16", + name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformUint8x32", + name: "TruncWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformInverseUint8x32", + name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x32", + name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformMaskedUint8x32", + name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "Get128Uint8x32", + name: "TruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Set128Uint8x32", + name: "TruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformUint8x64", + name: "TruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseUint8x64", + name: "TruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x64", + name: "TruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformMaskedUint8x64", + name: "TruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, } From ec5c20ba5a8b056ab2958bfac9c2093afcbdb326 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 22 Jul 2025 15:02:45 -0400 Subject: [PATCH 100/139] [dev.simd] cmd/compile: generated simd code to add some conversions Generated by arch/internal/simdgen CL 689735 A small number of conversions for testing purposes Change-Id: I4d52c643d08c02794c3fea9778bb1ecbb5507de4 Reviewed-on: https://go-review.googlesource.com/c/go/+/689716 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 18 ++ .../compile/internal/ssa/_gen/simdAMD64.rules | 12 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 12 + .../internal/ssa/_gen/simdgenericOps.go | 12 + src/cmd/compile/internal/ssa/opGen.go | 246 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 126 +++++++++ .../compile/internal/ssagen/simdintrinsics.go | 12 + src/simd/ops_amd64.go | 80 ++++++ 8 files changed, 518 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index f374cd25d0a561..d4126cef1e3eaf 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -36,6 +36,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VCVTTPS2DQ128, + ssa.OpAMD64VCVTTPS2DQ256, + ssa.OpAMD64VCVTTPS2DQ512, + ssa.OpAMD64VCVTPS2UDQ128, + ssa.OpAMD64VCVTPS2UDQ256, + ssa.OpAMD64VCVTPS2UDQ512, ssa.OpAMD64VPOPCNTB128, ssa.OpAMD64VPOPCNTB256, ssa.OpAMD64VPOPCNTB512, @@ -628,6 +634,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VCVTTPS2DQMasked128, + ssa.OpAMD64VCVTTPS2DQMasked256, + ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VCVTPS2UDQMasked128, + ssa.OpAMD64VCVTPS2UDQMasked256, + ssa.OpAMD64VCVTPS2UDQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1124,6 +1136,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VCVTTPS2DQMasked128, + ssa.OpAMD64VCVTTPS2DQMasked256, + ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VCVTPS2UDQMasked128, + ssa.OpAMD64VCVTPS2UDQMasked256, + ssa.OpAMD64VCVTPS2UDQMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, ssa.OpAMD64VREDUCEPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index fb153acf66e074..e5e3fb0d50e715 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -234,6 +234,18 @@ (CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) (CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) (CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) +(ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) +(ConvertToInt32Float32x16 ...) => (VCVTTPS2DQ512 ...) +(ConvertToInt32MaskedFloat32x4 x mask) => (VCVTTPS2DQMasked128 x (VPMOVVec32x4ToM mask)) +(ConvertToInt32MaskedFloat32x8 x mask) => (VCVTTPS2DQMasked256 x (VPMOVVec32x8ToM mask)) +(ConvertToInt32MaskedFloat32x16 x mask) => (VCVTTPS2DQMasked512 x (VPMOVVec32x16ToM mask)) +(ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) +(ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) +(ConvertToUint32Float32x16 ...) => (VCVTPS2UDQ512 ...) +(ConvertToUint32MaskedFloat32x4 x mask) => (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) +(ConvertToUint32MaskedFloat32x8 x mask) => (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) +(ConvertToUint32MaskedFloat32x16 x mask) => (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 3ab0eb527f89b6..adb6dd968f581d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -25,6 +25,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCOMPRESSPSMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTPS2UDQ128", argLength: 1, reg: w11, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTPS2UDQ256", argLength: 1, reg: w11, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTPS2UDQ512", argLength: 1, reg: w11, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTPS2UDQMasked128", argLength: 2, reg: wkw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTPS2UDQMasked256", argLength: 2, reg: wkw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTPS2UDQMasked512", argLength: 2, reg: wkw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTTPS2DQ128", argLength: 1, reg: v11, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTTPS2DQ256", argLength: 1, reg: v11, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTTPS2DQ512", argLength: 1, reg: w11, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTTPS2DQMasked128", argLength: 2, reg: wkw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTTPS2DQMasked256", argLength: 2, reg: wkw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTTPS2DQMasked512", argLength: 2, reg: wkw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 654c1ee17180ee..f1c1246d2400f3 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -225,6 +225,18 @@ func simdGenericOps() []opData { {name: "CompressUint64x2", argLength: 2, commutative: false}, {name: "CompressUint64x4", argLength: 2, commutative: false}, {name: "CompressUint64x8", argLength: 2, commutative: false}, + {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, + {name: "ConvertToInt32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToInt32MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ConvertToInt32MaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ConvertToInt32MaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, + {name: "ConvertToUint32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ConvertToUint32MaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ConvertToUint32MaskedFloat32x16", argLength: 2, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 89e0d853dcbb04..b9dc41e86074cc 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1230,6 +1230,18 @@ const ( OpAMD64VCOMPRESSPSMasked128 OpAMD64VCOMPRESSPSMasked256 OpAMD64VCOMPRESSPSMasked512 + OpAMD64VCVTPS2UDQ128 + OpAMD64VCVTPS2UDQ256 + OpAMD64VCVTPS2UDQ512 + OpAMD64VCVTPS2UDQMasked128 + OpAMD64VCVTPS2UDQMasked256 + OpAMD64VCVTPS2UDQMasked512 + OpAMD64VCVTTPS2DQ128 + OpAMD64VCVTTPS2DQ256 + OpAMD64VCVTTPS2DQ512 + OpAMD64VCVTTPS2DQMasked128 + OpAMD64VCVTTPS2DQMasked256 + OpAMD64VCVTTPS2DQMasked512 OpAMD64VDIVPD128 OpAMD64VDIVPD256 OpAMD64VDIVPD512 @@ -4671,6 +4683,18 @@ const ( OpCompressUint64x2 OpCompressUint64x4 OpCompressUint64x8 + OpConvertToInt32Float32x4 + OpConvertToInt32Float32x8 + OpConvertToInt32Float32x16 + OpConvertToInt32MaskedFloat32x4 + OpConvertToInt32MaskedFloat32x8 + OpConvertToInt32MaskedFloat32x16 + OpConvertToUint32Float32x4 + OpConvertToUint32Float32x8 + OpConvertToUint32Float32x16 + OpConvertToUint32MaskedFloat32x4 + OpConvertToUint32MaskedFloat32x8 + OpConvertToUint32MaskedFloat32x16 OpDivFloat32x4 OpDivFloat32x8 OpDivFloat32x16 @@ -19331,6 +19355,168 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCVTPS2UDQ128", + argLen: 1, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQ256", + argLen: 1, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQ512", + argLen: 1, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked128", + argLen: 2, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTPS2UDQMasked256", + argLen: 2, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTPS2UDQMasked512", + argLen: 2, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ128", + argLen: 1, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ256", + argLen: 1, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ512", + argLen: 1, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked128", + argLen: 2, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQMasked256", + argLen: 2, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQMasked512", + argLen: 2, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD128", argLen: 2, @@ -62407,6 +62593,66 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ConvertToInt32Float32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Float32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Float32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "ConvertToInt32MaskedFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "ConvertToInt32MaskedFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint32Float32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32Float32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32Float32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint32MaskedFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint32MaskedFloat32x16", + argLen: 2, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d9560c55c22d4b..11c7c20db261c0 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1267,6 +1267,36 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpConstBool(v) case OpConstNil: return rewriteValueAMD64_OpConstNil(v) + case OpConvertToInt32Float32x16: + v.Op = OpAMD64VCVTTPS2DQ512 + return true + case OpConvertToInt32Float32x4: + v.Op = OpAMD64VCVTTPS2DQ128 + return true + case OpConvertToInt32Float32x8: + v.Op = OpAMD64VCVTTPS2DQ256 + return true + case OpConvertToInt32MaskedFloat32x16: + return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x16(v) + case OpConvertToInt32MaskedFloat32x4: + return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x4(v) + case OpConvertToInt32MaskedFloat32x8: + return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x8(v) + case OpConvertToUint32Float32x16: + v.Op = OpAMD64VCVTPS2UDQ512 + return true + case OpConvertToUint32Float32x4: + v.Op = OpAMD64VCVTPS2UDQ128 + return true + case OpConvertToUint32Float32x8: + v.Op = OpAMD64VCVTPS2UDQ256 + return true + case OpConvertToUint32MaskedFloat32x16: + return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x16(v) + case OpConvertToUint32MaskedFloat32x4: + return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v) + case OpConvertToUint32MaskedFloat32x8: + return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v) case OpCtz16: return rewriteValueAMD64_OpCtz16(v) case OpCtz16NonZero: @@ -31928,6 +31958,102 @@ func rewriteValueAMD64_OpConstNil(v *Value) bool { return true } } +func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToInt32MaskedFloat32x16 x mask) + // result: (VCVTTPS2DQMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToInt32MaskedFloat32x4 x mask) + // result: (VCVTTPS2DQMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToInt32MaskedFloat32x8 x mask) + // result: (VCVTTPS2DQMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToUint32MaskedFloat32x16 x mask) + // result: (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToUint32MaskedFloat32x4 x mask) + // result: (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToUint32MaskedFloat32x8 x mask) + // result: (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index cf2e7fc67643f1..a8a2ff91420f04 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -245,6 +245,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 318883ea19c63c..8d941360907c14 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1446,6 +1446,86 @@ func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Asm: VPCOMPRESSQ, CPU Feature: AVX512F func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 +/* ConvertToInt32 */ + +// ConvertToInt32 converts element values to int32. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX +func (x Float32x4) ConvertToInt32() Int32x4 + +// ConvertToInt32 converts element values to int32. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX +func (x Float32x8) ConvertToInt32() Int32x8 + +// ConvertToInt32 converts element values to int32. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToInt32() Int32x16 + +/* ConvertToInt32Masked */ + +// ConvertToInt32 converts element values to int32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x4) ConvertToInt32Masked(mask Mask32x4) Int32x4 + +// ConvertToInt32 converts element values to int32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x8) ConvertToInt32Masked(mask Mask32x8) Int32x8 + +// ConvertToInt32 converts element values to int32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToInt32Masked(mask Mask32x16) Int32x16 + +/* ConvertToUint32 */ + +// ConvertToUint32Masked converts element values to uint32. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x4) ConvertToUint32() Uint32x4 + +// ConvertToUint32Masked converts element values to uint32. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x8) ConvertToUint32() Uint32x8 + +// ConvertToUint32Masked converts element values to uint32. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToUint32() Uint32x16 + +/* ConvertToUint32Masked */ + +// ConvertToUint32Masked converts element values to uint32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x4) ConvertToUint32Masked(mask Mask32x4) Uint32x4 + +// ConvertToUint32Masked converts element values to uint32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 + +// ConvertToUint32Masked converts element values to uint32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 + /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. From a24ffe337946ff2142baa772c0be27f28c3cdf98 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 22 Jul 2025 15:34:55 -0400 Subject: [PATCH 101/139] [dev.simd] simd: modify test generation to make it more flexible This is to support conversions, which are not T -> T. Change-Id: I323887b116eee8133770a899ed82363bba38a9c4 Reviewed-on: https://go-review.googlesource.com/c/go/+/689717 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/simd/binary_helpers_test.go | 252 ++++++++-------- src/simd/compare_helpers_test.go | 270 ++++++++--------- src/simd/comparemasked_helpers_test.go | 396 ++++++++++++------------- src/simd/genfiles.go | 87 +++++- src/simd/slice_amd64.go | 180 +++++------ src/simd/ternary_helpers_test.go | 288 +++++++++--------- src/simd/unary_helpers_test.go | 216 +++++++------- 7 files changed, 874 insertions(+), 815 deletions(-) diff --git a/src/simd/binary_helpers_test.go b/src/simd/binary_helpers_test.go index b5055980586d74..fbf31beb7c8e6b 100644 --- a/src/simd/binary_helpers_test.go +++ b/src/simd/binary_helpers_test.go @@ -28,90 +28,90 @@ func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, wan }) } -// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { - n := 16 +// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { + n := 8 t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + forSlicePair(t, int16s, n, func(x, y []int16) bool { t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - g := make([]uint8, n) + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { - n := 8 +// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { + n := 4 t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { + forSlicePair(t, int32s, n, func(x, y []int32) bool { t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - g := make([]int16, n) + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { - n := 8 +// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { + n := 2 t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + forSlicePair(t, int64s, n, func(x, y []int64) bool { t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - g := make([]uint16, n) + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { - n := 4 +// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { + n := 16 t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - g := make([]int32, n) + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { - n := 4 +// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { + n := 8 t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - g := make([]uint32, n) + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { - n := 2 +// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { + n := 4 t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - g := make([]int64, n) + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) @@ -178,90 +178,90 @@ func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, wan }) } -// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { - n := 32 +// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { + n := 16 t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + forSlicePair(t, int16s, n, func(x, y []int16) bool { t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - g := make([]uint8, n) + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { - n := 16 +// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { + n := 8 t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { + forSlicePair(t, int32s, n, func(x, y []int32) bool { t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - g := make([]int16, n) + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { - n := 16 +// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { + n := 4 t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + forSlicePair(t, int64s, n, func(x, y []int64) bool { t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - g := make([]uint16, n) + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { - n := 8 +// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { + n := 32 t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - g := make([]int32, n) + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { - n := 8 +// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { + n := 16 t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - g := make([]uint32, n) + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { - n := 4 +// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { + n := 8 t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - g := make([]int64, n) + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) @@ -328,90 +328,90 @@ func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, wan }) } -// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { - n := 64 +// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { + n := 32 t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + forSlicePair(t, int16s, n, func(x, y []int16) bool { t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - g := make([]uint8, n) + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { - n := 32 +// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { + n := 16 t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { + forSlicePair(t, int32s, n, func(x, y []int32) bool { t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - g := make([]int16, n) + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { - n := 32 +// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { + n := 8 t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + forSlicePair(t, int64s, n, func(x, y []int64) bool { t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - g := make([]uint16, n) + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { - n := 16 +// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { + n := 64 t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - g := make([]int32, n) + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { - n := 16 +// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { + n := 32 t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - g := make([]uint32, n) + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { - n := 8 +// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { + n := 16 t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - g := make([]int64, n) + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) diff --git a/src/simd/compare_helpers_test.go b/src/simd/compare_helpers_test.go index 948386307ca6b7..e6d7c82c8fe69f 100644 --- a/src/simd/compare_helpers_test.go +++ b/src/simd/compare_helpers_test.go @@ -28,21 +28,6 @@ func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, w }) } -// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt16x8Compare tests the simd comparison method f against the expected behavior generated by want func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, want func(_, _ []int16) []int64) { n := 8 @@ -58,21 +43,6 @@ func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, w }) } -// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { - n := 8 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt32x4Compare tests the simd comparison method f against the expected behavior generated by want func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, want func(_, _ []int32) []int64) { n := 4 @@ -88,21 +58,6 @@ func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, w }) } -// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { - n := 4 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt64x2Compare tests the simd comparison method f against the expected behavior generated by want func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, want func(_, _ []int64) []int64) { n := 2 @@ -118,6 +73,51 @@ func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, w }) } +// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + // testUint64x2Compare tests the simd comparison method f against the expected behavior generated by want func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, want func(_, _ []uint64) []int64) { n := 2 @@ -178,21 +178,6 @@ func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, w }) } -// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { - n := 32 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt16x16Compare tests the simd comparison method f against the expected behavior generated by want func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16, want func(_, _ []int16) []int64) { n := 16 @@ -208,21 +193,6 @@ func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16 }) } -// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt32x8Compare tests the simd comparison method f against the expected behavior generated by want func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, want func(_, _ []int32) []int64) { n := 8 @@ -238,21 +208,6 @@ func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, w }) } -// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { - n := 8 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt64x4Compare tests the simd comparison method f against the expected behavior generated by want func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, want func(_, _ []int64) []int64) { n := 4 @@ -268,6 +223,51 @@ func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, w }) } +// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + // testUint64x4Compare tests the simd comparison method f against the expected behavior generated by want func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, want func(_, _ []uint64) []int64) { n := 4 @@ -328,21 +328,6 @@ func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, w }) } -// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { - n := 64 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x64().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt16x32Compare tests the simd comparison method f against the expected behavior generated by want func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32, want func(_, _ []int16) []int64) { n := 32 @@ -358,21 +343,6 @@ func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32 }) } -// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { - n := 32 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt32x16Compare tests the simd comparison method f against the expected behavior generated by want func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16, want func(_, _ []int32) []int64) { n := 16 @@ -388,21 +358,6 @@ func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16 }) } -// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt64x8Compare tests the simd comparison method f against the expected behavior generated by want func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, want func(_, _ []int64) []int64) { n := 8 @@ -418,6 +373,51 @@ func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, w }) } +// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + // testUint64x8Compare tests the simd comparison method f against the expected behavior generated by want func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, want func(_, _ []uint64) []int64) { n := 8 diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go index 5a70f92f265805..0baba27e544278 100644 --- a/src/simd/comparemasked_helpers_test.go +++ b/src/simd/comparemasked_helpers_test.go @@ -37,20 +37,20 @@ func testInt8x16CompareMasked(t *testing.T, }) } -// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, - want func(_, _ []uint8) []int64) { - n := 16 +func testInt16x8CompareMasked(t *testing.T, + f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []int16) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() - g := make([]int8, n) - f(a, b, k).AsInt8x16().StoreSlice(g) + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -61,20 +61,20 @@ func testUint8x16CompareMasked(t *testing.T, }) } -// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x8CompareMasked(t *testing.T, - f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, - want func(_, _ []int16) []int64) { - n := 8 +func testInt32x4CompareMasked(t *testing.T, + f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []int32) []int64) { + n := 4 t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() - g := make([]int16, n) - f(a, b, k).AsInt16x8().StoreSlice(g) + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -85,20 +85,20 @@ func testInt16x8CompareMasked(t *testing.T, }) } -// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x8CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, - want func(_, _ []uint16) []int64) { - n := 8 +func testInt64x2CompareMasked(t *testing.T, + f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []int64) []int64) { + n := 2 t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() - g := make([]int16, n) - f(a, b, k).AsInt16x8().StoreSlice(g) + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -109,20 +109,20 @@ func testUint16x8CompareMasked(t *testing.T, }) } -// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x4CompareMasked(t *testing.T, - f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, - want func(_, _ []int32) []int64) { - n := 4 +func testUint8x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []uint8) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() - g := make([]int32, n) - f(a, b, k).AsInt32x4().StoreSlice(g) + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -133,20 +133,20 @@ func testInt32x4CompareMasked(t *testing.T, }) } -// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x4CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, - want func(_, _ []uint32) []int64) { - n := 4 +func testUint16x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []uint16) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() - g := make([]int32, n) - f(a, b, k).AsInt32x4().StoreSlice(g) + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -157,20 +157,20 @@ func testUint32x4CompareMasked(t *testing.T, }) } -// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x2CompareMasked(t *testing.T, - f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, - want func(_, _ []int64) []int64) { - n := 2 +func testUint32x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []uint32) []int64) { + n := 4 t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() - g := make([]int64, n) - f(a, b, k).AsInt64x2().StoreSlice(g) + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -277,20 +277,20 @@ func testInt8x32CompareMasked(t *testing.T, }) } -// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x32CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, - want func(_, _ []uint8) []int64) { - n := 32 +func testInt16x16CompareMasked(t *testing.T, + f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []int16) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() - g := make([]int8, n) - f(a, b, k).AsInt8x32().StoreSlice(g) + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -301,20 +301,20 @@ func testUint8x32CompareMasked(t *testing.T, }) } -// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x16CompareMasked(t *testing.T, - f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, - want func(_, _ []int16) []int64) { - n := 16 +func testInt32x8CompareMasked(t *testing.T, + f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []int32) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() - g := make([]int16, n) - f(a, b, k).AsInt16x16().StoreSlice(g) + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -325,20 +325,20 @@ func testInt16x16CompareMasked(t *testing.T, }) } -// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, - want func(_, _ []uint16) []int64) { - n := 16 +func testInt64x4CompareMasked(t *testing.T, + f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []int64) []int64) { + n := 4 t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() - g := make([]int16, n) - f(a, b, k).AsInt16x16().StoreSlice(g) + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -349,20 +349,20 @@ func testUint16x16CompareMasked(t *testing.T, }) } -// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x8CompareMasked(t *testing.T, - f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, - want func(_, _ []int32) []int64) { - n := 8 +func testUint8x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []uint8) []int64) { + n := 32 t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() - g := make([]int32, n) - f(a, b, k).AsInt32x8().StoreSlice(g) + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -373,20 +373,20 @@ func testInt32x8CompareMasked(t *testing.T, }) } -// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x8CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, - want func(_, _ []uint32) []int64) { - n := 8 +func testUint16x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []uint16) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() - g := make([]int32, n) - f(a, b, k).AsInt32x8().StoreSlice(g) + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -397,20 +397,20 @@ func testUint32x8CompareMasked(t *testing.T, }) } -// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x4CompareMasked(t *testing.T, - f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, - want func(_, _ []int64) []int64) { - n := 4 +func testUint32x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []uint32) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() - g := make([]int64, n) - f(a, b, k).AsInt64x4().StoreSlice(g) + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -517,20 +517,20 @@ func testInt8x64CompareMasked(t *testing.T, }) } -// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x64CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, - want func(_, _ []uint8) []int64) { - n := 64 +func testInt16x32CompareMasked(t *testing.T, + f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []int16) []int64) { + n := 32 t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() - g := make([]int8, n) - f(a, b, k).AsInt8x64().StoreSlice(g) + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -541,20 +541,20 @@ func testUint8x64CompareMasked(t *testing.T, }) } -// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x32CompareMasked(t *testing.T, - f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, - want func(_, _ []int16) []int64) { - n := 32 +func testInt32x16CompareMasked(t *testing.T, + f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []int32) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() - g := make([]int16, n) - f(a, b, k).AsInt16x32().StoreSlice(g) + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -565,20 +565,20 @@ func testInt16x32CompareMasked(t *testing.T, }) } -// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x32CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, - want func(_, _ []uint16) []int64) { - n := 32 +func testInt64x8CompareMasked(t *testing.T, + f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []int64) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() - g := make([]int16, n) - f(a, b, k).AsInt16x32().StoreSlice(g) + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -589,20 +589,20 @@ func testUint16x32CompareMasked(t *testing.T, }) } -// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x16CompareMasked(t *testing.T, - f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, - want func(_, _ []int32) []int64) { - n := 16 +func testUint8x64CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []uint8) []int64) { + n := 64 t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() - g := make([]int32, n) - f(a, b, k).AsInt32x16().StoreSlice(g) + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -613,20 +613,20 @@ func testInt32x16CompareMasked(t *testing.T, }) } -// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, - want func(_, _ []uint32) []int64) { - n := 16 +func testUint16x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []uint16) []int64) { + n := 32 t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() - g := make([]int32, n) - f(a, b, k).AsInt32x16().StoreSlice(g) + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -637,20 +637,20 @@ func testUint32x16CompareMasked(t *testing.T, }) } -// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x8CompareMasked(t *testing.T, - f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, - want func(_, _ []int64) []int64) { - n := 8 +func testUint32x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []uint32) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() - g := make([]int64, n) - f(a, b, k).AsInt64x8().StoreSlice(g) + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 8dac158fe43956..7106db2d315fd2 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -20,6 +20,34 @@ import ( "text/template" ) +// shapes describes a combination of vector widths and various element types +type shapes struct { + vecs []int // Vector bit width for this shape. + ints []int // Int element bit width(s) for this shape + uints []int // Unsigned int element bit width(s) for this shape + floats []int // Float element bit width(s) for this shape +} + +// shapeAndTemplate is a template and the set of shapes on which it will be expanded +type shapeAndTemplate struct { + s *shapes + t *template.Template +} + +var allShapes = &shapes{ + vecs: []int{128, 256, 512}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + +// these are the shapes that are currently converted to int32 +// (not all conversions are available, yet) +var toInt32Shapes = &shapes{ + vecs: []int{128, 256, 512}, + floats: []int{32}, +} + func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer) { b := width * count if b < 128 || b > 512 { @@ -34,12 +62,12 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io aOrAn = "an" } t.Execute(out, struct { - Vec string - AOrAn string - Width int - Count int - WxC string - Type string + Vec string // the type of the vector, e.g. Float32x4 + AOrAn string // for documentation, the article "a" or "an" + Width int // the bit width of the element type, e.g. 32 + Count int // the number of elements, e.g. 4 + WxC string // the width-by-type string, e.g., "32x4" + Type string // the element type, e.g. "float32" }{ Vec: vType, AOrAn: aOrAn, @@ -50,14 +78,21 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io }) } -func forTemplates(t *template.Template, out io.Writer) { - vecs := []int{128, 256, 512} - ints := []int{8, 16, 32, 64} - floats := []int{32, 64} +// forTemplates expands the template sat.t for each shape +// in sat.s, writing to out. +func (sat shapeAndTemplate) forTemplates(out io.Writer) { + t, s := sat.t, sat.s + vecs := s.vecs + ints := s.ints + uints := s.uints + floats := s.floats for _, v := range vecs { for _, w := range ints { c := v / w oneTemplate(t, "int", w, c, out) + } + for _, w := range uints { + c := v / w oneTemplate(t, "uint", w, c, out) } for _, w := range floats { @@ -114,8 +149,14 @@ func curryTestPrologue(t string) func(s string, out io.Writer) { // x.Store((*[16]uint8)(s[:16])) // } -func templateOf(name, temp string) *template.Template { - return template.Must(template.New(name).Parse(temp)) +func templateOf(name, temp string) shapeAndTemplate { + return shapeAndTemplate{s: allShapes, + t: template.Must(template.New(name).Parse(temp))} +} + +func shapedTemplateOf(s *shapes, name, temp string) shapeAndTemplate { + return shapeAndTemplate{s: s, + t: template.Must(template.New(name).Parse(temp))} } var sliceTemplate = templateOf("slice", ` @@ -146,6 +187,22 @@ func test{{.Vec}}Unary(t *testing.T, f func(_ simd.{{.Vec}}) simd.{{.Vec}}, want } `) +var unaryTemplateToInt32 = shapedTemplateOf(toInt32Shapes, "unary_int32_helpers", ` +// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want +func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{{.Count}}, want func(x []{{.Type}}) []int32) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + var binaryTemplate = templateOf("binary_helpers", ` // test{{.Vec}}Binary tests the simd binary method f against the expected behavior generated by want func test{{.Vec}}Binary(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _ []{{.Type}}) []{{.Type}}) { @@ -254,7 +311,7 @@ func main() { } } -func one(filename string, prologue func(s string, out io.Writer), t *template.Template) { +func one(filename string, prologue func(s string, out io.Writer), sats ...shapeAndTemplate) { if filename == "" { return } @@ -273,7 +330,9 @@ func one(filename string, prologue func(s string, out io.Writer), t *template.Te out := new(bytes.Buffer) prologue("go run genfiles.go", out) - forTemplates(t, out) + for _, sat := range sats { + sat.forTemplates(out) + } b, err := format.Source(out.Bytes()) if err != nil { diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index 62564e44a2185a..ad7bce8964ddcd 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -14,16 +14,6 @@ func (x Int8x16) StoreSlice(s []int8) { x.Store((*[16]int8)(s)) } -// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s -func LoadUint8x16Slice(s []uint8) Uint8x16 { - return LoadUint8x16((*[16]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint8s -func (x Uint8x16) StoreSlice(s []uint8) { - x.Store((*[16]uint8)(s)) -} - // LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s func LoadInt16x8Slice(s []int16) Int16x8 { return LoadInt16x8((*[8]int16)(s)) @@ -34,16 +24,6 @@ func (x Int16x8) StoreSlice(s []int16) { x.Store((*[8]int16)(s)) } -// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s -func LoadUint16x8Slice(s []uint16) Uint16x8 { - return LoadUint16x8((*[8]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint16s -func (x Uint16x8) StoreSlice(s []uint16) { - x.Store((*[8]uint16)(s)) -} - // LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s func LoadInt32x4Slice(s []int32) Int32x4 { return LoadInt32x4((*[4]int32)(s)) @@ -54,16 +34,6 @@ func (x Int32x4) StoreSlice(s []int32) { x.Store((*[4]int32)(s)) } -// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s -func LoadUint32x4Slice(s []uint32) Uint32x4 { - return LoadUint32x4((*[4]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 uint32s -func (x Uint32x4) StoreSlice(s []uint32) { - x.Store((*[4]uint32)(s)) -} - // LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s func LoadInt64x2Slice(s []int64) Int64x2 { return LoadInt64x2((*[2]int64)(s)) @@ -74,6 +44,36 @@ func (x Int64x2) StoreSlice(s []int64) { x.Store((*[2]int64)(s)) } +// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s +func LoadUint8x16Slice(s []uint8) Uint8x16 { + return LoadUint8x16((*[16]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint8s +func (x Uint8x16) StoreSlice(s []uint8) { + x.Store((*[16]uint8)(s)) +} + +// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s +func LoadUint16x8Slice(s []uint16) Uint16x8 { + return LoadUint16x8((*[8]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint16s +func (x Uint16x8) StoreSlice(s []uint16) { + x.Store((*[8]uint16)(s)) +} + +// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s +func LoadUint32x4Slice(s []uint32) Uint32x4 { + return LoadUint32x4((*[4]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint32s +func (x Uint32x4) StoreSlice(s []uint32) { + x.Store((*[4]uint32)(s)) +} + // LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s func LoadUint64x2Slice(s []uint64) Uint64x2 { return LoadUint64x2((*[2]uint64)(s)) @@ -114,16 +114,6 @@ func (x Int8x32) StoreSlice(s []int8) { x.Store((*[32]int8)(s)) } -// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s -func LoadUint8x32Slice(s []uint8) Uint8x32 { - return LoadUint8x32((*[32]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint8s -func (x Uint8x32) StoreSlice(s []uint8) { - x.Store((*[32]uint8)(s)) -} - // LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s func LoadInt16x16Slice(s []int16) Int16x16 { return LoadInt16x16((*[16]int16)(s)) @@ -134,16 +124,6 @@ func (x Int16x16) StoreSlice(s []int16) { x.Store((*[16]int16)(s)) } -// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s -func LoadUint16x16Slice(s []uint16) Uint16x16 { - return LoadUint16x16((*[16]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint16s -func (x Uint16x16) StoreSlice(s []uint16) { - x.Store((*[16]uint16)(s)) -} - // LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s func LoadInt32x8Slice(s []int32) Int32x8 { return LoadInt32x8((*[8]int32)(s)) @@ -154,16 +134,6 @@ func (x Int32x8) StoreSlice(s []int32) { x.Store((*[8]int32)(s)) } -// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s -func LoadUint32x8Slice(s []uint32) Uint32x8 { - return LoadUint32x8((*[8]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint32s -func (x Uint32x8) StoreSlice(s []uint32) { - x.Store((*[8]uint32)(s)) -} - // LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s func LoadInt64x4Slice(s []int64) Int64x4 { return LoadInt64x4((*[4]int64)(s)) @@ -174,6 +144,36 @@ func (x Int64x4) StoreSlice(s []int64) { x.Store((*[4]int64)(s)) } +// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s +func LoadUint8x32Slice(s []uint8) Uint8x32 { + return LoadUint8x32((*[32]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint8s +func (x Uint8x32) StoreSlice(s []uint8) { + x.Store((*[32]uint8)(s)) +} + +// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s +func LoadUint16x16Slice(s []uint16) Uint16x16 { + return LoadUint16x16((*[16]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint16s +func (x Uint16x16) StoreSlice(s []uint16) { + x.Store((*[16]uint16)(s)) +} + +// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s +func LoadUint32x8Slice(s []uint32) Uint32x8 { + return LoadUint32x8((*[8]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint32s +func (x Uint32x8) StoreSlice(s []uint32) { + x.Store((*[8]uint32)(s)) +} + // LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s func LoadUint64x4Slice(s []uint64) Uint64x4 { return LoadUint64x4((*[4]uint64)(s)) @@ -214,16 +214,6 @@ func (x Int8x64) StoreSlice(s []int8) { x.Store((*[64]int8)(s)) } -// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s -func LoadUint8x64Slice(s []uint8) Uint8x64 { - return LoadUint8x64((*[64]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 64 uint8s -func (x Uint8x64) StoreSlice(s []uint8) { - x.Store((*[64]uint8)(s)) -} - // LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s func LoadInt16x32Slice(s []int16) Int16x32 { return LoadInt16x32((*[32]int16)(s)) @@ -234,16 +224,6 @@ func (x Int16x32) StoreSlice(s []int16) { x.Store((*[32]int16)(s)) } -// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s -func LoadUint16x32Slice(s []uint16) Uint16x32 { - return LoadUint16x32((*[32]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint16s -func (x Uint16x32) StoreSlice(s []uint16) { - x.Store((*[32]uint16)(s)) -} - // LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s func LoadInt32x16Slice(s []int32) Int32x16 { return LoadInt32x16((*[16]int32)(s)) @@ -254,16 +234,6 @@ func (x Int32x16) StoreSlice(s []int32) { x.Store((*[16]int32)(s)) } -// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s -func LoadUint32x16Slice(s []uint32) Uint32x16 { - return LoadUint32x16((*[16]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint32s -func (x Uint32x16) StoreSlice(s []uint32) { - x.Store((*[16]uint32)(s)) -} - // LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s func LoadInt64x8Slice(s []int64) Int64x8 { return LoadInt64x8((*[8]int64)(s)) @@ -274,6 +244,36 @@ func (x Int64x8) StoreSlice(s []int64) { x.Store((*[8]int64)(s)) } +// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s +func LoadUint8x64Slice(s []uint8) Uint8x64 { + return LoadUint8x64((*[64]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 uint8s +func (x Uint8x64) StoreSlice(s []uint8) { + x.Store((*[64]uint8)(s)) +} + +// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s +func LoadUint16x32Slice(s []uint16) Uint16x32 { + return LoadUint16x32((*[32]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint16s +func (x Uint16x32) StoreSlice(s []uint16) { + x.Store((*[32]uint16)(s)) +} + +// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s +func LoadUint32x16Slice(s []uint32) Uint32x16 { + return LoadUint32x16((*[16]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint32s +func (x Uint32x16) StoreSlice(s []uint32) { + x.Store((*[16]uint32)(s)) +} + // LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s func LoadUint64x8Slice(s []uint64) Uint64x8 { return LoadUint64x8((*[8]uint64)(s)) diff --git a/src/simd/ternary_helpers_test.go b/src/simd/ternary_helpers_test.go index 5a7503860f0a25..e48ec2409c133c 100644 --- a/src/simd/ternary_helpers_test.go +++ b/src/simd/ternary_helpers_test.go @@ -29,22 +29,6 @@ func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, }) } -// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { - n := 16 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - c := simd.LoadUint8x16Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt16x8Ternary tests the simd ternary method f against the expected behavior generated by want func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, want func(_, _, _ []int16) []int16) { n := 8 @@ -61,22 +45,6 @@ func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, }) } -// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { - n := 8 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - c := simd.LoadUint16x8Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt32x4Ternary tests the simd ternary method f against the expected behavior generated by want func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, want func(_, _, _ []int32) []int32) { n := 4 @@ -93,22 +61,6 @@ func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, }) } -// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { - n := 4 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - c := simd.LoadUint32x4Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt64x2Ternary tests the simd ternary method f against the expected behavior generated by want func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, want func(_, _, _ []int64) []int64) { n := 2 @@ -125,6 +77,54 @@ func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, }) } +// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + c := simd.LoadUint8x16Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + c := simd.LoadUint16x8Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + c := simd.LoadUint32x4Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + // testUint64x2Ternary tests the simd ternary method f against the expected behavior generated by want func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64x2, want func(_, _, _ []uint64) []uint64) { n := 2 @@ -189,22 +189,6 @@ func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, }) } -// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { - n := 32 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - c := simd.LoadUint8x32Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt16x16Ternary tests the simd ternary method f against the expected behavior generated by want func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x16, want func(_, _, _ []int16) []int16) { n := 16 @@ -221,22 +205,6 @@ func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x }) } -// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { - n := 16 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - c := simd.LoadUint16x16Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt32x8Ternary tests the simd ternary method f against the expected behavior generated by want func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, want func(_, _, _ []int32) []int32) { n := 8 @@ -253,22 +221,6 @@ func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, }) } -// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { - n := 8 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - c := simd.LoadUint32x8Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt64x4Ternary tests the simd ternary method f against the expected behavior generated by want func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, want func(_, _, _ []int64) []int64) { n := 4 @@ -285,6 +237,54 @@ func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, }) } +// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + c := simd.LoadUint8x32Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + c := simd.LoadUint16x16Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + c := simd.LoadUint32x8Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + // testUint64x4Ternary tests the simd ternary method f against the expected behavior generated by want func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64x4, want func(_, _, _ []uint64) []uint64) { n := 4 @@ -349,22 +349,6 @@ func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, }) } -// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { - n := 64 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - c := simd.LoadUint8x64Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt16x32Ternary tests the simd ternary method f against the expected behavior generated by want func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x32, want func(_, _, _ []int16) []int16) { n := 32 @@ -381,22 +365,6 @@ func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x }) } -// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { - n := 32 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - c := simd.LoadUint16x32Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt32x16Ternary tests the simd ternary method f against the expected behavior generated by want func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x16, want func(_, _, _ []int32) []int32) { n := 16 @@ -413,22 +381,6 @@ func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x }) } -// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { - n := 16 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - c := simd.LoadUint32x16Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt64x8Ternary tests the simd ternary method f against the expected behavior generated by want func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, want func(_, _, _ []int64) []int64) { n := 8 @@ -445,6 +397,54 @@ func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, }) } +// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + c := simd.LoadUint8x64Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + c := simd.LoadUint16x32Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + c := simd.LoadUint32x16Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + // testUint64x8Ternary tests the simd ternary method f against the expected behavior generated by want func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64x8, want func(_, _, _ []uint64) []uint64) { n := 8 diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go index 2ee39b9a222915..cdc5151a216826 100644 --- a/src/simd/unary_helpers_test.go +++ b/src/simd/unary_helpers_test.go @@ -27,84 +27,84 @@ func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want fu }) } -// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { - n := 16 +// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { + n := 8 t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { + forSlice(t, int16s, n, func(x []int16) bool { t.Helper() - a := simd.LoadUint8x16Slice(x) - g := make([]uint8, n) + a := simd.LoadInt16x8Slice(x) + g := make([]int16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { - n := 8 +// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { + n := 4 t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { + forSlice(t, int32s, n, func(x []int32) bool { t.Helper() - a := simd.LoadInt16x8Slice(x) - g := make([]int16, n) + a := simd.LoadInt32x4Slice(x) + g := make([]int32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { - n := 8 +// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { + n := 2 t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { + forSlice(t, int64s, n, func(x []int64) bool { t.Helper() - a := simd.LoadUint16x8Slice(x) - g := make([]uint16, n) + a := simd.LoadInt64x2Slice(x) + g := make([]int64, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { - n := 4 +// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { + n := 16 t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { + forSlice(t, uint8s, n, func(x []uint8) bool { t.Helper() - a := simd.LoadInt32x4Slice(x) - g := make([]int32, n) + a := simd.LoadUint8x16Slice(x) + g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { - n := 4 +// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { + n := 8 t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { + forSlice(t, uint16s, n, func(x []uint16) bool { t.Helper() - a := simd.LoadUint32x4Slice(x) - g := make([]uint32, n) + a := simd.LoadUint16x8Slice(x) + g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { - n := 2 +// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { + n := 4 t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { + forSlice(t, uint32s, n, func(x []uint32) bool { t.Helper() - a := simd.LoadInt64x2Slice(x) - g := make([]int64, n) + a := simd.LoadUint32x4Slice(x) + g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) @@ -167,84 +167,84 @@ func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want fu }) } -// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { - n := 32 +// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { + n := 16 t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { + forSlice(t, int16s, n, func(x []int16) bool { t.Helper() - a := simd.LoadUint8x32Slice(x) - g := make([]uint8, n) + a := simd.LoadInt16x16Slice(x) + g := make([]int16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { - n := 16 +// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { + n := 8 t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { + forSlice(t, int32s, n, func(x []int32) bool { t.Helper() - a := simd.LoadInt16x16Slice(x) - g := make([]int16, n) + a := simd.LoadInt32x8Slice(x) + g := make([]int32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { - n := 16 +// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { + n := 4 t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { + forSlice(t, int64s, n, func(x []int64) bool { t.Helper() - a := simd.LoadUint16x16Slice(x) - g := make([]uint16, n) + a := simd.LoadInt64x4Slice(x) + g := make([]int64, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { - n := 8 +// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { + n := 32 t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { + forSlice(t, uint8s, n, func(x []uint8) bool { t.Helper() - a := simd.LoadInt32x8Slice(x) - g := make([]int32, n) + a := simd.LoadUint8x32Slice(x) + g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { - n := 8 +// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { + n := 16 t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { + forSlice(t, uint16s, n, func(x []uint16) bool { t.Helper() - a := simd.LoadUint32x8Slice(x) - g := make([]uint32, n) + a := simd.LoadUint16x16Slice(x) + g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { - n := 4 +// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { + n := 8 t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { + forSlice(t, uint32s, n, func(x []uint32) bool { t.Helper() - a := simd.LoadInt64x4Slice(x) - g := make([]int64, n) + a := simd.LoadUint32x8Slice(x) + g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) @@ -307,84 +307,84 @@ func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want fu }) } -// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { - n := 64 +// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { + n := 32 t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { + forSlice(t, int16s, n, func(x []int16) bool { t.Helper() - a := simd.LoadUint8x64Slice(x) - g := make([]uint8, n) + a := simd.LoadInt16x32Slice(x) + g := make([]int16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { - n := 32 +// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { + n := 16 t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { + forSlice(t, int32s, n, func(x []int32) bool { t.Helper() - a := simd.LoadInt16x32Slice(x) - g := make([]int16, n) + a := simd.LoadInt32x16Slice(x) + g := make([]int32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { - n := 32 +// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { + n := 8 t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { + forSlice(t, int64s, n, func(x []int64) bool { t.Helper() - a := simd.LoadUint16x32Slice(x) - g := make([]uint16, n) + a := simd.LoadInt64x8Slice(x) + g := make([]int64, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { - n := 16 +// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { + n := 64 t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { + forSlice(t, uint8s, n, func(x []uint8) bool { t.Helper() - a := simd.LoadInt32x16Slice(x) - g := make([]int32, n) + a := simd.LoadUint8x64Slice(x) + g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { - n := 16 +// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { + n := 32 t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { + forSlice(t, uint16s, n, func(x []uint16) bool { t.Helper() - a := simd.LoadUint32x16Slice(x) - g := make([]uint32, n) + a := simd.LoadUint16x32Slice(x) + g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { - n := 8 +// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { + n := 16 t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { + forSlice(t, uint32s, n, func(x []uint32) bool { t.Helper() - a := simd.LoadInt64x8Slice(x) - g := make([]int64, n) + a := simd.LoadUint32x16Slice(x) + g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) From 09ff25e3508287970940645b97e4d88e92bb5407 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 22 Jul 2025 16:39:42 -0400 Subject: [PATCH 102/139] [dev.simd] simd: add tests for simd conversions to Int32/Uint32. Change-Id: I71a6c6708e19d210f1fbdc72379f8215356ff02e Reviewed-on: https://go-review.googlesource.com/c/go/+/689718 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/simd/genfiles.go | 22 ++++++-- src/simd/simulation_helpers_test.go | 28 ++++++++++ src/simd/unary_helpers_test.go | 84 +++++++++++++++++++++++++++++ src/simd/unary_test.go | 14 +++++ 4 files changed, 145 insertions(+), 3 deletions(-) diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 7106db2d315fd2..76f16392e67103 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -43,7 +43,7 @@ var allShapes = &shapes{ // these are the shapes that are currently converted to int32 // (not all conversions are available, yet) -var toInt32Shapes = &shapes{ +var convert32Shapes = &shapes{ vecs: []int{128, 256, 512}, floats: []int{32}, } @@ -187,7 +187,7 @@ func test{{.Vec}}Unary(t *testing.T, f func(_ simd.{{.Vec}}) simd.{{.Vec}}, want } `) -var unaryTemplateToInt32 = shapedTemplateOf(toInt32Shapes, "unary_int32_helpers", ` +var unaryTemplateToInt32 = shapedTemplateOf(convert32Shapes, "unary_int32_helpers", ` // test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{{.Count}}, want func(x []{{.Type}}) []int32) { n := {{.Count}} @@ -203,6 +203,22 @@ func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{ } `) +var unaryTemplateToUint32 = shapedTemplateOf(convert32Shapes, "unary_uint32_helpers", ` +// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want +func test{{.Vec}}UnaryToUint32(t *testing.T, f func(x simd.{{.Vec}}) simd.Uint32x{{.Count}}, want func(x []{{.Type}}) []uint32) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + var binaryTemplate = templateOf("binary_helpers", ` // test{{.Vec}}Binary tests the simd binary method f against the expected behavior generated by want func test{{.Vec}}Binary(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _ []{{.Type}}) []{{.Type}}) { @@ -295,7 +311,7 @@ func main() { one(*sl, prologue, sliceTemplate) } if *uh != "" { - one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate) + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryTemplateToInt32, unaryTemplateToUint32) } if *bh != "" { one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go index 1def39cd92bdf9..ec3d7952490de0 100644 --- a/src/simd/simulation_helpers_test.go +++ b/src/simd/simulation_helpers_test.go @@ -106,6 +106,26 @@ func fma[T float](x, y, z T) T { return T(math.FMA(float64(x), float64(y), float64(z))) } +func toInt32[T number](x T) int32 { + return int32(x) +} + +func toUint32[T number](x T) uint32 { + switch y := (any(x)).(type) { + case float32: + if y < 0 || y > float32(math.MaxUint32) || y != y { + return math.MaxUint32 + } + case float64: + if y < 0 || y > float64(math.MaxUint32) || y != y { + return math.MaxUint32 + } + } + return uint32(x) +} + +// Slice versions of all these elementwise operations + func addSlice[T number](x, y []T) []T { return map2[T](add)(x, y) } @@ -202,3 +222,11 @@ func imaSlice[T integer](x, y, z []T) []T { func fmaSlice[T float](x, y, z []T) []T { return map3[T](fma)(x, y, z) } + +func toInt32Slice[T number](x []T) []int32 { + return map1[T](toInt32)(x) +} + +func toUint32Slice[T number](x []T) []uint32 { + return map1[T](toUint32)(x) +} diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go index cdc5151a216826..4e0f09428e76bb 100644 --- a/src/simd/unary_helpers_test.go +++ b/src/simd/unary_helpers_test.go @@ -432,3 +432,87 @@ func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, w return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } + +// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x4UnaryToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32x4, want func(x []float32) []int32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x8UnaryToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32x8, want func(x []float32) []int32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x16UnaryToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int32x16, want func(x []float32) []int32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x4UnaryToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint32x4, want func(x []float32) []uint32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x8UnaryToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint32x8, want func(x []float32) []uint32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x16UnaryToUint32(t *testing.T, f func(x simd.Float32x16) simd.Uint32x16, want func(x []float32) []uint32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index be6a0909bec937..6565df30965742 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -82,3 +82,17 @@ func TestAbsolute(t *testing.T) { testInt64x8Unary(t, simd.Int64x8.Absolute, map1[int64](abs)) } } + +func TestToInt32(t *testing.T) { + testFloat32x4UnaryToInt32(t, simd.Float32x4.ConvertToInt32, toInt32Slice[float32]) + testFloat32x8UnaryToInt32(t, simd.Float32x8.ConvertToInt32, toInt32Slice[float32]) +} + +func TestToUint32(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testFloat32x4UnaryToUint32(t, simd.Float32x4.ConvertToUint32, toUint32Slice[float32]) + testFloat32x8UnaryToUint32(t, simd.Float32x8.ConvertToUint32, toUint32Slice[float32]) + testFloat32x16UnaryToUint32(t, simd.Float32x16.ConvertToUint32, toUint32Slice[float32]) +} From 08bec02907cf59c3fd60e5c5e31b2d6c30b462b7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 23 Jul 2025 13:47:08 -0400 Subject: [PATCH 103/139] [dev.simd] cmd/compile: add register-to-mask moves, other simd glue This includes code generated by simdgen CL 689955, here because of git-facilitated pilot error (the generated file should have been in the next CL but that is related to this one, so, oh well). Change-Id: Ibfea3f1cd93ca9cd12970edf15a013471677a6ba Reviewed-on: https://go-review.googlesource.com/c/go/+/689936 Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 8 + src/cmd/compile/internal/ssa/_gen/AMD64.rules | 47 +++- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 7 + .../compile/internal/ssa/_gen/genericOps.go | 14 + src/cmd/compile/internal/ssa/opGen.go | 128 +++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 264 +++++++++++++++--- src/cmd/compile/internal/ssagen/intrinsics.go | 32 ++- .../compile/internal/ssagen/simdintrinsics.go | 12 + src/simd/types_amd64.go | 48 ++++ 9 files changed, 505 insertions(+), 55 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index efa7895e97d35e..5b2df50b13adaa 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1530,6 +1530,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64KMOVQ, ssa.OpAMD64KMOVD, ssa.OpAMD64KMOVW, ssa.OpAMD64KMOVB: + // See also ssa.OpAMD64KMOVQload + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + default: if !ssaGenSIMDValue(s, v) { v.Fatalf("genValue not implemented: %s", v.LongString()) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 0136e41af76831..1195c0de7f2032 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1682,21 +1682,23 @@ (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x) // XXX SIMD -(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) -(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) -(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) -(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) -(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) -(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) +// Mask loads +(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) +(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) +(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) -(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) -(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) -(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) +(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) +(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) +(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) -(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) -(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) -(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) +(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) +(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) +(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) + +(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) +(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) +(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) (StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) (StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) @@ -1714,6 +1716,26 @@ (StoreMask64x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) (StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) +// TODO is this correct? Should we just do it all from 64-bits? + +// Mask conversions (from integers) +(Cvt16toMask8x16 x) => (VPMOVMToVec8x16 (KMOVW x)) +(Cvt32toMask8x32 x) => (VPMOVMToVec8x32 (KMOVD x)) +(Cvt64toMask8x64 x) => (VPMOVMToVec8x64 (KMOVQ x)) + +(Cvt8toMask16x8 x) => (VPMOVMToVec16x8 (KMOVB x)) +(Cvt16toMask16x16 x) => (VPMOVMToVec16x16 (KMOVW x)) +(Cvt32toMask16x32 x) => (VPMOVMToVec16x32 (KMOVD x)) + +(Cvt8toMask32x4 x) => (VPMOVMToVec32x4 (KMOVB x)) +(Cvt8toMask32x8 x) => (VPMOVMToVec32x8 (KMOVB x)) +(Cvt16toMask32x16 x) => (VPMOVMToVec32x16 (KMOVW x)) + +(Cvt8toMask64x2 x) => (VPMOVMToVec64x2 (KMOVB x)) +(Cvt8toMask64x4 x) => (VPMOVMToVec64x4 (KMOVB x)) +(Cvt8toMask64x8 x) => (VPMOVMToVec64x8 (KMOVB x)) + +// SIMD vector loads and stores (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) @@ -1723,6 +1745,7 @@ (Load ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem) (Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem) +// SIMD vector integer-vector-masked loads and stores. (LoadMasked32 ptr mask mem) && t.Size() == 16 => (VPMASK32load128 ptr mask mem) (LoadMasked32 ptr mask mem) && t.Size() == 32 => (VPMASK32load256 ptr mask mem) (LoadMasked64 ptr mask mem) && t.Size() == 16 => (VPMASK64load128 ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 66c37a495fbeb5..8ab0b8235117c1 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -242,6 +242,7 @@ func init() { kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} + gpk = regInfo{inputs: gponly, outputs: maskonly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1337,6 +1338,12 @@ func init() { {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, + + // Move GP directly to mask register + {name: "KMOVQ", argLength: 1, reg: gpk, asm: "KMOVQ"}, + {name: "KMOVD", argLength: 1, reg: gpk, asm: "KMOVD"}, + {name: "KMOVW", argLength: 1, reg: gpk, asm: "KMOVW"}, + {name: "KMOVB", argLength: 1, reg: gpk, asm: "KMOVB"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index c1383199c4cebc..e714e347e2b07b 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -699,6 +699,20 @@ var genericOps = []opData{ {name: "StoreMask64x2", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. {name: "StoreMask64x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. {name: "StoreMask64x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + + // Convert integers to masks + {name: "Cvt16toMask8x16", argLength: 1}, // arg0 = integer mask value + {name: "Cvt32toMask8x32", argLength: 1}, // arg0 = integer mask value + {name: "Cvt64toMask8x64", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask16x8", argLength: 1}, // arg0 = integer mask value + {name: "Cvt16toMask16x16", argLength: 1}, // arg0 = integer mask value + {name: "Cvt32toMask16x32", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask32x4", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask32x8", argLength: 1}, // arg0 = integer mask value + {name: "Cvt16toMask32x16", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask64x2", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask64x4", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask64x8", argLength: 1}, // arg0 = integer mask value } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index b9dc41e86074cc..61ce06203ab2ac 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1208,6 +1208,10 @@ const ( OpAMD64VZEROALL OpAMD64KMOVQload OpAMD64KMOVQstore + OpAMD64KMOVQ + OpAMD64KMOVD + OpAMD64KMOVW + OpAMD64KMOVB OpAMD64VADDPD128 OpAMD64VADDPD256 OpAMD64VADDPD512 @@ -4461,6 +4465,18 @@ const ( OpStoreMask64x2 OpStoreMask64x4 OpStoreMask64x8 + OpCvt16toMask8x16 + OpCvt32toMask8x32 + OpCvt64toMask8x64 + OpCvt8toMask16x8 + OpCvt16toMask16x16 + OpCvt32toMask16x32 + OpCvt8toMask32x4 + OpCvt8toMask32x8 + OpCvt16toMask32x16 + OpCvt8toMask64x2 + OpCvt8toMask64x4 + OpCvt8toMask64x8 OpAbsoluteInt8x16 OpAbsoluteInt8x32 OpAbsoluteInt8x64 @@ -19029,6 +19045,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "KMOVQ", + argLen: 1, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVD", + argLen: 1, + asm: x86.AKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVW", + argLen: 1, + asm: x86.AKMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVB", + argLen: 1, + asm: x86.AKMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VADDPD128", argLen: 2, @@ -61379,6 +61447,66 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Cvt16toMask8x16", + argLen: 1, + generic: true, + }, + { + name: "Cvt32toMask8x32", + argLen: 1, + generic: true, + }, + { + name: "Cvt64toMask8x64", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask16x8", + argLen: 1, + generic: true, + }, + { + name: "Cvt16toMask16x16", + argLen: 1, + generic: true, + }, + { + name: "Cvt32toMask16x32", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask32x4", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask32x8", + argLen: 1, + generic: true, + }, + { + name: "Cvt16toMask32x16", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask64x2", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask64x4", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask64x8", + argLen: 1, + generic: true, + }, { name: "AbsoluteInt8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 11c7c20db261c0..d79c856ae8d20e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1313,6 +1313,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCtz8(v) case OpCtz8NonZero: return rewriteValueAMD64_OpCtz8NonZero(v) + case OpCvt16toMask16x16: + return rewriteValueAMD64_OpCvt16toMask16x16(v) + case OpCvt16toMask32x16: + return rewriteValueAMD64_OpCvt16toMask32x16(v) + case OpCvt16toMask8x16: + return rewriteValueAMD64_OpCvt16toMask8x16(v) case OpCvt32Fto32: v.Op = OpAMD64CVTTSS2SL return true @@ -1328,6 +1334,10 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt32to64F: v.Op = OpAMD64CVTSL2SD return true + case OpCvt32toMask16x32: + return rewriteValueAMD64_OpCvt32toMask16x32(v) + case OpCvt32toMask8x32: + return rewriteValueAMD64_OpCvt32toMask8x32(v) case OpCvt64Fto32: v.Op = OpAMD64CVTTSD2SL return true @@ -1343,6 +1353,20 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt64to64F: v.Op = OpAMD64CVTSQ2SD return true + case OpCvt64toMask8x64: + return rewriteValueAMD64_OpCvt64toMask8x64(v) + case OpCvt8toMask16x8: + return rewriteValueAMD64_OpCvt8toMask16x8(v) + case OpCvt8toMask32x4: + return rewriteValueAMD64_OpCvt8toMask32x4(v) + case OpCvt8toMask32x8: + return rewriteValueAMD64_OpCvt8toMask32x8(v) + case OpCvt8toMask64x2: + return rewriteValueAMD64_OpCvt8toMask64x2(v) + case OpCvt8toMask64x4: + return rewriteValueAMD64_OpCvt8toMask64x4(v) + case OpCvt8toMask64x8: + return rewriteValueAMD64_OpCvt8toMask64x8(v) case OpCvtBoolToUint8: v.Op = OpCopy return true @@ -32276,6 +32300,186 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpCvt16toMask16x16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt16toMask16x16 x) + // result: (VPMOVMToVec16x16 (KMOVW x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec16x16) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt16toMask32x16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt16toMask32x16 x) + // result: (VPMOVMToVec32x16 (KMOVW x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec32x16) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt16toMask8x16 x) + // result: (VPMOVMToVec8x16 (KMOVW x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec8x16) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt32toMask16x32 x) + // result: (VPMOVMToVec16x32 (KMOVD x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec16x32) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt32toMask8x32 x) + // result: (VPMOVMToVec8x32 (KMOVD x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec8x32) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt64toMask8x64 x) + // result: (VPMOVMToVec8x64 (KMOVQ x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec8x64) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQ, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask16x8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask16x8 x) + // result: (VPMOVMToVec16x8 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec16x8) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask32x4(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask32x4 x) + // result: (VPMOVMToVec32x4 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec32x4) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask32x8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask32x8 x) + // result: (VPMOVMToVec32x8 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec32x8) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask64x2(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask64x2 x) + // result: (VPMOVMToVec64x2 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec64x2) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask64x4(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask64x4 x) + // result: (VPMOVMToVec64x4 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec64x4) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask64x8 x) + // result: (VPMOVMToVec64x8 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec64x8) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) @@ -40478,14 +40682,13 @@ func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x16 ptr mem) - // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40496,14 +40699,13 @@ func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x32 ptr mem) - // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40514,14 +40716,13 @@ func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x8 ptr mem) - // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40532,14 +40733,13 @@ func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x16 ptr mem) - // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40550,14 +40750,13 @@ func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x4 ptr mem) - // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40568,14 +40767,13 @@ func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x8 ptr mem) - // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40586,14 +40784,13 @@ func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x2 ptr mem) - // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40604,14 +40801,13 @@ func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x4 ptr mem) - // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40622,14 +40818,13 @@ func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x8 ptr mem) - // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40640,14 +40835,13 @@ func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x16 ptr mem) - // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40658,14 +40852,13 @@ func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x32 ptr mem) - // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40676,14 +40869,13 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x64 ptr mem) - // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 7326ae2485247b..d7b25f2ab1c830 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1775,15 +1775,23 @@ func simdStore() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { } } +var loadMaskOpcodes = map[int]map[int]ssa.Op{ + 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64}, + 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32}, + 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16}, + 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, +} + +var cvtMaskOpcodes = map[int]map[int]ssa.Op{ + 8: {16: ssa.OpCvt16toMask8x16, 32: ssa.OpCvt32toMask8x32, 64: ssa.OpCvt64toMask8x64}, + 16: {8: ssa.OpCvt8toMask16x8, 16: ssa.OpCvt16toMask16x16, 32: ssa.OpCvt32toMask16x32}, + 32: {4: ssa.OpCvt8toMask32x4, 8: ssa.OpCvt8toMask32x8, 16: ssa.OpCvt16toMask32x16}, + 64: {2: ssa.OpCvt8toMask64x2, 4: ssa.OpCvt8toMask64x4, 8: ssa.OpCvt8toMask64x8}, +} + func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - opCodes := map[int]map[int]ssa.Op{ - 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64}, - 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32}, - 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16}, - 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, - } - op := opCodes[elemBits][lanes] + op := loadMaskOpcodes[elemBits][lanes] if op == 0 { panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) } @@ -1808,6 +1816,16 @@ func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*s } } +func simdCvtMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + op := cvtMaskOpcodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + return s.newValue1(op, types.TypeMask, args[0]) + } +} + func simdMaskedLoad(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, n.Type(), args[0], args[1], s.mem()) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a8a2ff91420f04..dddfab5b71ab7c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2174,70 +2174,82 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16FromBits", simdCvtMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32FromBits", simdCvtMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64FromBits", simdCvtMask(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8FromBits", simdCvtMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16FromBits", simdCvtMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32FromBits", simdCvtMask(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4FromBits", simdCvtMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8FromBits", simdCvtMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16FromBits", simdCvtMask(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2FromBits", simdCvtMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4FromBits", simdCvtMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8FromBits", simdCvtMask(64, 8), sys.AMD64) } diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index c1676ff34e27a0..252da021e2e0cc 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -293,6 +293,10 @@ func LoadMask8x16FromBits(y *uint64) Mask8x16 //go:noescape func (x Mask8x16) StoreToBits(y *uint64) +// Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +func Mask8x16FromBits(y uint16) Mask8x16 + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 @@ -315,6 +319,10 @@ func LoadMask16x8FromBits(y *uint64) Mask16x8 //go:noescape func (x Mask16x8) StoreToBits(y *uint64) +// Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +func Mask16x8FromBits(y uint8) Mask16x8 + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 @@ -337,6 +345,10 @@ func LoadMask32x4FromBits(y *uint64) Mask32x4 //go:noescape func (x Mask32x4) StoreToBits(y *uint64) +// Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +func Mask32x4FromBits(y uint8) Mask32x4 + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 @@ -359,6 +371,10 @@ func LoadMask64x2FromBits(y *uint64) Mask64x2 //go:noescape func (x Mask64x2) StoreToBits(y *uint64) +// Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +func Mask64x2FromBits(y uint8) Mask64x2 + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -648,6 +664,10 @@ func LoadMask8x32FromBits(y *uint64) Mask8x32 //go:noescape func (x Mask8x32) StoreToBits(y *uint64) +// Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +func Mask8x32FromBits(y uint32) Mask8x32 + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 @@ -670,6 +690,10 @@ func LoadMask16x16FromBits(y *uint64) Mask16x16 //go:noescape func (x Mask16x16) StoreToBits(y *uint64) +// Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +func Mask16x16FromBits(y uint16) Mask16x16 + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 @@ -692,6 +716,10 @@ func LoadMask32x8FromBits(y *uint64) Mask32x8 //go:noescape func (x Mask32x8) StoreToBits(y *uint64) +// Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +func Mask32x8FromBits(y uint8) Mask32x8 + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 @@ -714,6 +742,10 @@ func LoadMask64x4FromBits(y *uint64) Mask64x4 //go:noescape func (x Mask64x4) StoreToBits(y *uint64) +// Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +func Mask64x4FromBits(y uint8) Mask64x4 + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -931,6 +963,10 @@ func LoadMask8x64FromBits(y *uint64) Mask8x64 //go:noescape func (x Mask8x64) StoreToBits(y *uint64) +// Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +func Mask8x64FromBits(y uint64) Mask8x64 + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 @@ -953,6 +989,10 @@ func LoadMask16x32FromBits(y *uint64) Mask16x32 //go:noescape func (x Mask16x32) StoreToBits(y *uint64) +// Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +func Mask16x32FromBits(y uint32) Mask16x32 + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 @@ -975,6 +1015,10 @@ func LoadMask32x16FromBits(y *uint64) Mask32x16 //go:noescape func (x Mask32x16) StoreToBits(y *uint64) +// Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +func Mask32x16FromBits(y uint16) Mask32x16 + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 @@ -996,3 +1040,7 @@ func LoadMask64x8FromBits(y *uint64) Mask64x8 // //go:noescape func (x Mask64x8) StoreToBits(y *uint64) + +// Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +func Mask64x8FromBits(y uint8) Mask64x8 From f39711a03d8c957bfae0af36d85ca8e6a74c6dff Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 23 Jul 2025 14:11:35 -0400 Subject: [PATCH 104/139] [dev.simd] cmd/compile: test for int-to-mask conversion Change-Id: If341cb2c25dc535cdebe6f539db3cab8917d5afe Reviewed-on: https://go-review.googlesource.com/c/go/+/689937 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/simd/simd_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 541a33d34ad3fa..72180a304691b2 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -428,3 +428,19 @@ func TestBitMaskStore(t *testing.T) { t.Errorf("Result incorrect: want %b, got %b", want, got) } } + +func TestBitMaskFromBits(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + results := [2]int64{} + want := [2]int64{0, 6} + m := simd.Mask64x2FromBits(0b10) + simd.LoadInt64x2Slice([]int64{1, 2}).AddMasked(simd.LoadInt64x2Slice([]int64{3, 4}), m).Store(&results) + for i := range 2 { + if results[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) + } + } +} From 1ac5f3533f9dccb0f2fd9f21f833a76e68378ea7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 23 Jul 2025 21:04:38 -0400 Subject: [PATCH 105/139] [dev.simd] cmd/compile: opcodes and rules and code generation to enable AVX512 masked loads/stores Change-Id: I9e05fc5031420f60a2e6bac7b9f86365f0f4c0f1 Reviewed-on: https://go-review.googlesource.com/c/go/+/690335 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 19 ++ src/cmd/compile/internal/ssa/_gen/AMD64.rules | 12 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 16 +- .../compile/internal/ssa/_gen/genericOps.go | 4 + src/cmd/compile/internal/ssa/opGen.go | 162 ++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 182 ++++++++++++++++++ 6 files changed, 392 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 5b2df50b13adaa..9e772a71693040 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1494,6 +1494,25 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.To, v) p.AddRestSourceReg(simdReg(v.Args[1])) // masking simd reg + case ssa.OpAMD64VPMASK64load512, ssa.OpAMD64VPMASK32load512, ssa.OpAMD64VPMASK16load512, ssa.OpAMD64VPMASK8load512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + p.AddRestSourceReg(v.Args[1].Reg()) // simd mask reg + x86.ParseSuffix(p, "Z") // must be zero if not in mask + + case ssa.OpAMD64VPMASK64store512, ssa.OpAMD64VPMASK32store512, ssa.OpAMD64VPMASK16store512, ssa.OpAMD64VPMASK8store512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + p.AddRestSourceReg(v.Args[1].Reg()) // simd mask reg + case ssa.OpAMD64VPMOVMToVec8x16, ssa.OpAMD64VPMOVMToVec8x32, ssa.OpAMD64VPMOVMToVec8x64, diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 1195c0de7f2032..5dafc4b563b537 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1756,6 +1756,18 @@ (StoreMasked64 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK64store128 ptr mask val mem) (StoreMasked64 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK64store256 ptr mask val mem) +// SIMD vector K-masked loads and stores + +(LoadMasked64 ptr mask mem) && t.Size() == 64 => (VPMASK64load512 ptr (VPMOVVec64x8ToM mask) mem) +(LoadMasked32 ptr mask mem) && t.Size() == 64 => (VPMASK32load512 ptr (VPMOVVec32x16ToM mask) mem) +(LoadMasked16 ptr mask mem) && t.Size() == 64 => (VPMASK16load512 ptr (VPMOVVec16x32ToM mask) mem) +(LoadMasked8 ptr mask mem) && t.Size() == 64 => (VPMASK8load512 ptr (VPMOVVec8x64ToM mask) mem) + +(StoreMasked64 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK64store512 ptr (VPMOVVec64x8ToM mask) val mem) +(StoreMasked32 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK32store512 ptr (VPMOVVec32x16ToM mask) val mem) +(StoreMasked16 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK16store512 ptr (VPMOVVec16x32ToM mask) val mem) +(StoreMasked8 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK8store512 ptr (VPMOVVec8x64ToM mask) val mem) + (ZeroSIMD ) && t.Size() == 16 => (Zero128 ) (ZeroSIMD ) && t.Size() == 32 => (Zero256 ) (ZeroSIMD ) && t.Size() == 64 => (Zero512 ) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 8ab0b8235117c1..402f50bfc2c58b 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -205,8 +205,8 @@ func init() { // masked loads/stores, vector register or mask register vloadv = regInfo{inputs: []regMask{gpspsb, v, 0}, outputs: vonly} vstorev = regInfo{inputs: []regMask{gpspsb, v, v, 0}} - // vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} - // vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} + vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} + vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} v01 = regInfo{inputs: nil, outputs: vonly} v11 = regInfo{inputs: vonly, outputs: vonly} @@ -1286,7 +1286,7 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem - // AVX2 32 and 64-bit element masked moves. + // AVX2 32 and 64-bit element int-vector masked moves. {name: "VPMASK32load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem {name: "VPMASK32store128", argLength: 4, reg: vstorev, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem {name: "VPMASK64load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem @@ -1297,6 +1297,16 @@ func init() { {name: "VPMASK64load256", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem {name: "VPMASK64store256", argLength: 4, reg: vstorev, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + // AVX512 8-64-bit element mask-register masked moves + {name: "VPMASK8load512", argLength: 3, reg: vloadk, asm: "VMOVDQU8", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK8store512", argLength: 4, reg: vstorek, asm: "VMOVDQU8", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMASK16load512", argLength: 3, reg: vloadk, asm: "VMOVDQU16", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK16store512", argLength: 4, reg: vstorek, asm: "VMOVDQU16", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMASK32load512", argLength: 3, reg: vloadk, asm: "VMOVDQU32", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK32store512", argLength: 4, reg: vstorek, asm: "VMOVDQU32", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMASK64load512", argLength: 3, reg: vloadk, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK64store512", argLength: 4, reg: vstorek, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x64", argLength: 1, reg: kw, asm: "VPMOVM2B"}, diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index e714e347e2b07b..34514abc92fdef 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -375,8 +375,12 @@ var genericOps = []opData{ // masked memory operations. // TODO add 16 and 8 + {name: "LoadMasked8", argLength: 3}, // Load from arg0, arg1 = mask of 8-bits, arg2 = memory + {name: "LoadMasked16", argLength: 3}, // Load from arg0, arg1 = mask of 16-bits, arg2 = memory {name: "LoadMasked32", argLength: 3}, // Load from arg0, arg1 = mask of 32-bits, arg2 = memory {name: "LoadMasked64", argLength: 3}, // Load from arg0, arg1 = mask of 64-bits, arg2 = memory + {name: "StoreMasked8", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 8-bits, arg3 = memory + {name: "StoreMasked16", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 16-bits, arg3 = memory {name: "StoreMasked32", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 32-bits, arg3 = memory {name: "StoreMasked64", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 64-bits, arg3 = memory diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 61ce06203ab2ac..ed0203b6390dc1 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1177,6 +1177,14 @@ const ( OpAMD64VPMASK32store256 OpAMD64VPMASK64load256 OpAMD64VPMASK64store256 + OpAMD64VPMASK8load512 + OpAMD64VPMASK8store512 + OpAMD64VPMASK16load512 + OpAMD64VPMASK16store512 + OpAMD64VPMASK32load512 + OpAMD64VPMASK32store512 + OpAMD64VPMASK64load512 + OpAMD64VPMASK64store512 OpAMD64VPMOVMToVec8x16 OpAMD64VPMOVMToVec8x32 OpAMD64VPMOVMToVec8x64 @@ -4270,8 +4278,12 @@ const ( OpLoad OpDereference OpStore + OpLoadMasked8 + OpLoadMasked16 OpLoadMasked32 OpLoadMasked64 + OpStoreMasked8 + OpStoreMasked16 OpStoreMasked32 OpStoreMasked64 OpMove @@ -18661,6 +18673,134 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMASK8load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK8store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK16load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK16store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK32load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK32store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK64load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK64store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, { name: "VPMOVMToVec8x16", argLen: 1, @@ -60363,6 +60503,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "LoadMasked8", + argLen: 3, + generic: true, + }, + { + name: "LoadMasked16", + argLen: 3, + generic: true, + }, { name: "LoadMasked32", argLen: 3, @@ -60373,6 +60523,18 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "StoreMasked8", + auxType: auxTyp, + argLen: 4, + generic: true, + }, + { + name: "StoreMasked16", + auxType: auxTyp, + argLen: 4, + generic: true, + }, { name: "StoreMasked32", auxType: auxTyp, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d79c856ae8d20e..986f256887521b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2516,10 +2516,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLoadMask8x32(v) case OpLoadMask8x64: return rewriteValueAMD64_OpLoadMask8x64(v) + case OpLoadMasked16: + return rewriteValueAMD64_OpLoadMasked16(v) case OpLoadMasked32: return rewriteValueAMD64_OpLoadMasked32(v) case OpLoadMasked64: return rewriteValueAMD64_OpLoadMasked64(v) + case OpLoadMasked8: + return rewriteValueAMD64_OpLoadMasked8(v) case OpLocalAddr: return rewriteValueAMD64_OpLocalAddr(v) case OpLsh16x16: @@ -5266,10 +5270,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpStoreMask8x32(v) case OpStoreMask8x64: return rewriteValueAMD64_OpStoreMask8x64(v) + case OpStoreMasked16: + return rewriteValueAMD64_OpStoreMasked16(v) case OpStoreMasked32: return rewriteValueAMD64_OpStoreMasked32(v) case OpStoreMasked64: return rewriteValueAMD64_OpStoreMasked64(v) + case OpStoreMasked8: + return rewriteValueAMD64_OpStoreMasked8(v) case OpSub16: v.Op = OpAMD64SUBL return true @@ -40881,10 +40889,35 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpLoadMasked16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMasked16 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK16load512 ptr (VPMOVVec16x32ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK16load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (LoadMasked32 ptr mask mem) // cond: t.Size() == 16 // result: (VPMASK32load128 ptr mask mem) @@ -40915,12 +40948,30 @@ func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { v.AddArg3(ptr, mask, mem) return true } + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK32load512 ptr (VPMOVVec32x16ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK32load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } return false } func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (LoadMasked64 ptr mask mem) // cond: t.Size() == 16 // result: (VPMASK64load128 ptr mask mem) @@ -40951,6 +41002,47 @@ func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { v.AddArg3(ptr, mask, mem) return true } + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK64load512 ptr (VPMOVVec64x8ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK64load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLoadMasked8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMasked8 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK8load512 ptr (VPMOVVec8x64ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK8load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } return false } func rewriteValueAMD64_OpLocalAddr(v *Value) bool { @@ -53915,11 +54007,38 @@ func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpStoreMasked16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMasked16 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK16store512 ptr (VPMOVVec16x32ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK16store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } + return false +} func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (StoreMasked32 {t} ptr mask val mem) // cond: t.Size() == 16 // result: (VPMASK32store128 ptr mask val mem) @@ -53952,6 +54071,24 @@ func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { v.AddArg4(ptr, mask, val, mem) return true } + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK32store512 ptr (VPMOVVec32x16ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK32store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { @@ -53959,6 +54096,7 @@ func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (StoreMasked64 {t} ptr mask val mem) // cond: t.Size() == 16 // result: (VPMASK64store128 ptr mask val mem) @@ -53991,6 +54129,50 @@ func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { v.AddArg4(ptr, mask, val, mem) return true } + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK64store512 ptr (VPMOVVec64x8ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK64store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpStoreMasked8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMasked8 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK8store512 ptr (VPMOVVec8x64ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK8store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { From c25e5c86b2da8117b2d5c934b368ecbcf8e2efd5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 24 Jul 2025 10:31:46 -0400 Subject: [PATCH 106/139] [dev.simd] cmd/compile: generated code for K-mask-register slice load/stores plus slice-part load, store and test for a single type. Generated by arch/internal/simdgen CL 690315 Change-Id: I58052728b544c4a772a2870ac68f3c832813e1ea Reviewed-on: https://go-review.googlesource.com/c/go/+/690336 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- .../compile/internal/ssagen/simdintrinsics.go | 28 +++ src/simd/slicepart_amd64.go | 45 ++++ src/simd/slicepart_test.go | 47 ++++ src/simd/types_amd64.go | 232 ++++++++++++++++++ 4 files changed, 352 insertions(+) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index dddfab5b71ab7c..a30144cbd10ece 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2148,26 +2148,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedFloat32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Float32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Float32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedFloat64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Float64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "LoadMaskedFloat64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Float64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Float64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64) + addF(simdPackage, "Int8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64) + addF(simdPackage, "LoadMaskedInt16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64) + addF(simdPackage, "Int16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64) addF(simdPackage, "LoadMaskedInt32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Int32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedInt32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Int32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedInt32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Int32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedInt64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Int64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "LoadMaskedInt64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Int64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Int64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64) + addF(simdPackage, "Uint8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64) + addF(simdPackage, "LoadMaskedUint16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64) + addF(simdPackage, "Uint16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64) addF(simdPackage, "LoadMaskedUint32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Uint32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedUint32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Uint32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedUint32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Uint32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedUint64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Uint64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "LoadMaskedUint64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Uint64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Uint64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedMask8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64) + addF(simdPackage, "Mask8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64) + addF(simdPackage, "LoadMaskedMask16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64) + addF(simdPackage, "Mask16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64) + addF(simdPackage, "LoadMaskedMask32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Mask32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedMask64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Mask64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 00025775be114b..3fcfc6255ba4ff 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -419,6 +419,24 @@ func paInt64x4(s []int64) *[4]int64 { return (*[4]int64)(unsafe.Pointer(&s[0])) } +// For 512-bit masked loads/stores + +func paInt64x8(s []int64) *[8]int64 { + return (*[8]int64)(unsafe.Pointer(&s[0])) +} + +func paInt32x16(s []int32) *[16]int32 { + return (*[16]int32)(unsafe.Pointer(&s[0])) +} + +func paInt16x32(s []int16) *[32]int16 { + return (*[32]int16)(unsafe.Pointer(&s[0])) +} + +func paInt8x64(s []int8) *[64]int8 { + return (*[64]int8)(unsafe.Pointer(&s[0])) +} + /* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ // LoadInt32x4SlicePart loads a Int32x4 from the slice s. @@ -742,3 +760,30 @@ func (x Float64x4) StoreSlicePart(s []float64) { t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) x.AsInt64x4().StoreSlicePart(t) } + +func LoadInt64x8SlicePart(s []int64) Int64x8 { + l := len(s) + if l >= 8 { + return LoadInt64x8Slice(s) + } + if l == 0 { + var x Int64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedInt64x8(paInt64x8(s), mask) +} + +func (x Int64x8) StoreSlicePart(s []int64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paInt64x8(s), mask) +} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index cfdb7581d9e9e7..c9492bea1ba2dc 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -341,3 +341,50 @@ func TestSlicePartFloat32(t *testing.T) { } } } + +// 512-bit load + +func TestSlicePartInt64(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + L := 8 + c := []int64{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadInt64x8SlicePart(e) + // d contains what a ought to contain + d := make([]int64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]int64, L) + v.StoreSlice(b) + // test the load + checkSlicesLogInput(t, b, d, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) + + // Test the store + f := make([]int64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 252da021e2e0cc..ac8cf3c210adde 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -31,12 +31,16 @@ func (x Float32x4) Store(y *[4]float32) // LoadMaskedFloat32x4 loads a Float32x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat32x4(y *[4]float32, mask Mask32x4) Float32x4 // StoreMasked stores a Float32x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Float32x4) StoreMasked(y *[4]float32, mask Mask32x4) @@ -62,12 +66,16 @@ func (x Float64x2) Store(y *[2]float64) // LoadMaskedFloat64x2 loads a Float64x2 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat64x2(y *[2]float64, mask Mask64x2) Float64x2 // StoreMasked stores a Float64x2 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Float64x2) StoreMasked(y *[2]float64, mask Mask64x2) @@ -131,12 +139,16 @@ func (x Int32x4) Store(y *[4]int32) // LoadMaskedInt32x4 loads a Int32x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt32x4(y *[4]int32, mask Mask32x4) Int32x4 // StoreMasked stores a Int32x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Int32x4) StoreMasked(y *[4]int32, mask Mask32x4) @@ -162,12 +174,16 @@ func (x Int64x2) Store(y *[2]int64) // LoadMaskedInt64x2 loads a Int64x2 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt64x2(y *[2]int64, mask Mask64x2) Int64x2 // StoreMasked stores a Int64x2 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Int64x2) StoreMasked(y *[2]int64, mask Mask64x2) @@ -231,12 +247,16 @@ func (x Uint32x4) Store(y *[4]uint32) // LoadMaskedUint32x4 loads a Uint32x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint32x4(y *[4]uint32, mask Mask32x4) Uint32x4 // StoreMasked stores a Uint32x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Uint32x4) StoreMasked(y *[4]uint32, mask Mask32x4) @@ -262,12 +282,16 @@ func (x Uint64x2) Store(y *[2]uint64) // LoadMaskedUint64x2 loads a Uint64x2 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint64x2(y *[2]uint64, mask Mask64x2) Uint64x2 // StoreMasked stores a Uint64x2 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Uint64x2) StoreMasked(y *[2]uint64, mask Mask64x2) @@ -295,6 +319,8 @@ func (x Mask8x16) StoreToBits(y *uint64) // Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. +// +// Asm: KMOVB, CPU Feature: AVX512" func Mask8x16FromBits(y uint16) Mask8x16 // Mask16x8 is a 128-bit SIMD vector of 8 int16 @@ -321,6 +347,8 @@ func (x Mask16x8) StoreToBits(y *uint64) // Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. +// +// Asm: KMOVW, CPU Feature: AVX512" func Mask16x8FromBits(y uint8) Mask16x8 // Mask32x4 is a 128-bit SIMD vector of 4 int32 @@ -347,6 +375,8 @@ func (x Mask32x4) StoreToBits(y *uint64) // Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. +// +// Asm: KMOVD, CPU Feature: AVX512" func Mask32x4FromBits(y uint8) Mask32x4 // Mask64x2 is a 128-bit SIMD vector of 2 int64 @@ -373,6 +403,8 @@ func (x Mask64x2) StoreToBits(y *uint64) // Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 2 bits of y are used. +// +// Asm: KMOVQ, CPU Feature: AVX512" func Mask64x2FromBits(y uint8) Mask64x2 // v256 is a tag type that tells the compiler that this is really 256-bit SIMD @@ -402,12 +434,16 @@ func (x Float32x8) Store(y *[8]float32) // LoadMaskedFloat32x8 loads a Float32x8 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat32x8(y *[8]float32, mask Mask32x8) Float32x8 // StoreMasked stores a Float32x8 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Float32x8) StoreMasked(y *[8]float32, mask Mask32x8) @@ -433,12 +469,16 @@ func (x Float64x4) Store(y *[4]float64) // LoadMaskedFloat64x4 loads a Float64x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat64x4(y *[4]float64, mask Mask64x4) Float64x4 // StoreMasked stores a Float64x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Float64x4) StoreMasked(y *[4]float64, mask Mask64x4) @@ -502,12 +542,16 @@ func (x Int32x8) Store(y *[8]int32) // LoadMaskedInt32x8 loads a Int32x8 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt32x8(y *[8]int32, mask Mask32x8) Int32x8 // StoreMasked stores a Int32x8 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Int32x8) StoreMasked(y *[8]int32, mask Mask32x8) @@ -533,12 +577,16 @@ func (x Int64x4) Store(y *[4]int64) // LoadMaskedInt64x4 loads a Int64x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt64x4(y *[4]int64, mask Mask64x4) Int64x4 // StoreMasked stores a Int64x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Int64x4) StoreMasked(y *[4]int64, mask Mask64x4) @@ -602,12 +650,16 @@ func (x Uint32x8) Store(y *[8]uint32) // LoadMaskedUint32x8 loads a Uint32x8 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint32x8(y *[8]uint32, mask Mask32x8) Uint32x8 // StoreMasked stores a Uint32x8 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Uint32x8) StoreMasked(y *[8]uint32, mask Mask32x8) @@ -633,12 +685,16 @@ func (x Uint64x4) Store(y *[4]uint64) // LoadMaskedUint64x4 loads a Uint64x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint64x4(y *[4]uint64, mask Mask64x4) Uint64x4 // StoreMasked stores a Uint64x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Uint64x4) StoreMasked(y *[4]uint64, mask Mask64x4) @@ -666,6 +722,8 @@ func (x Mask8x32) StoreToBits(y *uint64) // Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. +// +// Asm: KMOVB, CPU Feature: AVX512" func Mask8x32FromBits(y uint32) Mask8x32 // Mask16x16 is a 256-bit SIMD vector of 16 int16 @@ -692,6 +750,8 @@ func (x Mask16x16) StoreToBits(y *uint64) // Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. +// +// Asm: KMOVW, CPU Feature: AVX512" func Mask16x16FromBits(y uint16) Mask16x16 // Mask32x8 is a 256-bit SIMD vector of 8 int32 @@ -718,6 +778,8 @@ func (x Mask32x8) StoreToBits(y *uint64) // Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. +// +// Asm: KMOVD, CPU Feature: AVX512" func Mask32x8FromBits(y uint8) Mask32x8 // Mask64x4 is a 256-bit SIMD vector of 4 int64 @@ -744,6 +806,8 @@ func (x Mask64x4) StoreToBits(y *uint64) // Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. +// +// Asm: KMOVQ, CPU Feature: AVX512" func Mask64x4FromBits(y uint8) Mask64x4 // v512 is a tag type that tells the compiler that this is really 512-bit SIMD @@ -770,6 +834,22 @@ func LoadFloat32x16(y *[16]float32) Float32x16 //go:noescape func (x Float32x16) Store(y *[16]float32) +// LoadMaskedFloat32x16 loads a Float32x16 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedFloat32x16(y *[16]float32, mask Mask32x16) Float32x16 + +// StoreMasked stores a Float32x16 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +// +//go:noescape +func (x Float32x16) StoreMasked(y *[16]float32, mask Mask32x16) + // Float64x8 is a 512-bit SIMD vector of 8 float64 type Float64x8 struct { float64x8 v512 @@ -789,6 +869,22 @@ func LoadFloat64x8(y *[8]float64) Float64x8 //go:noescape func (x Float64x8) Store(y *[8]float64) +// LoadMaskedFloat64x8 loads a Float64x8 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedFloat64x8(y *[8]float64, mask Mask64x8) Float64x8 + +// StoreMasked stores a Float64x8 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +// +//go:noescape +func (x Float64x8) StoreMasked(y *[8]float64, mask Mask64x8) + // Int8x64 is a 512-bit SIMD vector of 64 int8 type Int8x64 struct { int8x64 v512 @@ -808,6 +904,22 @@ func LoadInt8x64(y *[64]int8) Int8x64 //go:noescape func (x Int8x64) Store(y *[64]int8) +// LoadMaskedInt8x64 loads a Int8x64 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt8x64(y *[64]int8, mask Mask8x64) Int8x64 + +// StoreMasked stores a Int8x64 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +// +//go:noescape +func (x Int8x64) StoreMasked(y *[64]int8, mask Mask8x64) + // Int16x32 is a 512-bit SIMD vector of 32 int16 type Int16x32 struct { int16x32 v512 @@ -827,6 +939,22 @@ func LoadInt16x32(y *[32]int16) Int16x32 //go:noescape func (x Int16x32) Store(y *[32]int16) +// LoadMaskedInt16x32 loads a Int16x32 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt16x32(y *[32]int16, mask Mask16x32) Int16x32 + +// StoreMasked stores a Int16x32 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +// +//go:noescape +func (x Int16x32) StoreMasked(y *[32]int16, mask Mask16x32) + // Int32x16 is a 512-bit SIMD vector of 16 int32 type Int32x16 struct { int32x16 v512 @@ -846,6 +974,22 @@ func LoadInt32x16(y *[16]int32) Int32x16 //go:noescape func (x Int32x16) Store(y *[16]int32) +// LoadMaskedInt32x16 loads a Int32x16 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt32x16(y *[16]int32, mask Mask32x16) Int32x16 + +// StoreMasked stores a Int32x16 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +// +//go:noescape +func (x Int32x16) StoreMasked(y *[16]int32, mask Mask32x16) + // Int64x8 is a 512-bit SIMD vector of 8 int64 type Int64x8 struct { int64x8 v512 @@ -865,6 +1009,22 @@ func LoadInt64x8(y *[8]int64) Int64x8 //go:noescape func (x Int64x8) Store(y *[8]int64) +// LoadMaskedInt64x8 loads a Int64x8 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt64x8(y *[8]int64, mask Mask64x8) Int64x8 + +// StoreMasked stores a Int64x8 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +// +//go:noescape +func (x Int64x8) StoreMasked(y *[8]int64, mask Mask64x8) + // Uint8x64 is a 512-bit SIMD vector of 64 uint8 type Uint8x64 struct { uint8x64 v512 @@ -884,6 +1044,22 @@ func LoadUint8x64(y *[64]uint8) Uint8x64 //go:noescape func (x Uint8x64) Store(y *[64]uint8) +// LoadMaskedUint8x64 loads a Uint8x64 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint8x64(y *[64]uint8, mask Mask8x64) Uint8x64 + +// StoreMasked stores a Uint8x64 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +// +//go:noescape +func (x Uint8x64) StoreMasked(y *[64]uint8, mask Mask8x64) + // Uint16x32 is a 512-bit SIMD vector of 32 uint16 type Uint16x32 struct { uint16x32 v512 @@ -903,6 +1079,22 @@ func LoadUint16x32(y *[32]uint16) Uint16x32 //go:noescape func (x Uint16x32) Store(y *[32]uint16) +// LoadMaskedUint16x32 loads a Uint16x32 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint16x32(y *[32]uint16, mask Mask16x32) Uint16x32 + +// StoreMasked stores a Uint16x32 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +// +//go:noescape +func (x Uint16x32) StoreMasked(y *[32]uint16, mask Mask16x32) + // Uint32x16 is a 512-bit SIMD vector of 16 uint32 type Uint32x16 struct { uint32x16 v512 @@ -922,6 +1114,22 @@ func LoadUint32x16(y *[16]uint32) Uint32x16 //go:noescape func (x Uint32x16) Store(y *[16]uint32) +// LoadMaskedUint32x16 loads a Uint32x16 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint32x16(y *[16]uint32, mask Mask32x16) Uint32x16 + +// StoreMasked stores a Uint32x16 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +// +//go:noescape +func (x Uint32x16) StoreMasked(y *[16]uint32, mask Mask32x16) + // Uint64x8 is a 512-bit SIMD vector of 8 uint64 type Uint64x8 struct { uint64x8 v512 @@ -941,6 +1149,22 @@ func LoadUint64x8(y *[8]uint64) Uint64x8 //go:noescape func (x Uint64x8) Store(y *[8]uint64) +// LoadMaskedUint64x8 loads a Uint64x8 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint64x8(y *[8]uint64, mask Mask64x8) Uint64x8 + +// StoreMasked stores a Uint64x8 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +// +//go:noescape +func (x Uint64x8) StoreMasked(y *[8]uint64, mask Mask64x8) + // Mask8x64 is a 512-bit SIMD vector of 64 int8 type Mask8x64 struct { int8x64 v512 @@ -965,6 +1189,8 @@ func (x Mask8x64) StoreToBits(y *uint64) // Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 64 bits of y are used. +// +// Asm: KMOVB, CPU Feature: AVX512" func Mask8x64FromBits(y uint64) Mask8x64 // Mask16x32 is a 512-bit SIMD vector of 32 int16 @@ -991,6 +1217,8 @@ func (x Mask16x32) StoreToBits(y *uint64) // Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. +// +// Asm: KMOVW, CPU Feature: AVX512" func Mask16x32FromBits(y uint32) Mask16x32 // Mask32x16 is a 512-bit SIMD vector of 16 int32 @@ -1017,6 +1245,8 @@ func (x Mask32x16) StoreToBits(y *uint64) // Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. +// +// Asm: KMOVD, CPU Feature: AVX512" func Mask32x16FromBits(y uint16) Mask32x16 // Mask64x8 is a 512-bit SIMD vector of 8 int64 @@ -1043,4 +1273,6 @@ func (x Mask64x8) StoreToBits(y *uint64) // Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. +// +// Asm: KMOVQ, CPU Feature: AVX512" func Mask64x8FromBits(y uint8) Mask64x8 From 2c25f3e846e840b47dda21fec88bb69f84cd3561 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 31 Jul 2025 23:45:09 +0000 Subject: [PATCH 107/139] [dev.simd] cmd/compile, simd: change Shift*AndFillUpperFrom to Shift*Concat This CL is generated by CL 692216. Change-Id: Ib7530142bcce2a23f90d48866271994c57561955 Reviewed-on: https://go-review.googlesource.com/c/go/+/692215 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssa/_gen/simdAMD64.rules | 288 +++---- .../internal/ssa/_gen/simdgenericOps.go | 288 +++---- src/cmd/compile/internal/ssa/opGen.go | 576 +++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 720 +++++++++--------- .../compile/internal/ssagen/simdintrinsics.go | 288 +++---- src/simd/ops_amd64.go | 592 +++++++------- 6 files changed, 1376 insertions(+), 1376 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e5e3fb0d50e715..38b602f35b8624 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1401,42 +1401,42 @@ (ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) -(ShiftAllLeftAndFillUpperFromInt16x8 ...) => (VPSHLDW128 ...) -(ShiftAllLeftAndFillUpperFromInt16x16 ...) => (VPSHLDW256 ...) -(ShiftAllLeftAndFillUpperFromInt16x32 ...) => (VPSHLDW512 ...) -(ShiftAllLeftAndFillUpperFromInt32x4 ...) => (VPSHLDD128 ...) -(ShiftAllLeftAndFillUpperFromInt32x8 ...) => (VPSHLDD256 ...) -(ShiftAllLeftAndFillUpperFromInt32x16 ...) => (VPSHLDD512 ...) -(ShiftAllLeftAndFillUpperFromInt64x2 ...) => (VPSHLDQ128 ...) -(ShiftAllLeftAndFillUpperFromInt64x4 ...) => (VPSHLDQ256 ...) -(ShiftAllLeftAndFillUpperFromInt64x8 ...) => (VPSHLDQ512 ...) -(ShiftAllLeftAndFillUpperFromUint16x8 ...) => (VPSHLDW128 ...) -(ShiftAllLeftAndFillUpperFromUint16x16 ...) => (VPSHLDW256 ...) -(ShiftAllLeftAndFillUpperFromUint16x32 ...) => (VPSHLDW512 ...) -(ShiftAllLeftAndFillUpperFromUint32x4 ...) => (VPSHLDD128 ...) -(ShiftAllLeftAndFillUpperFromUint32x8 ...) => (VPSHLDD256 ...) -(ShiftAllLeftAndFillUpperFromUint32x16 ...) => (VPSHLDD512 ...) -(ShiftAllLeftAndFillUpperFromUint64x2 ...) => (VPSHLDQ128 ...) -(ShiftAllLeftAndFillUpperFromUint64x4 ...) => (VPSHLDQ256 ...) -(ShiftAllLeftAndFillUpperFromUint64x8 ...) => (VPSHLDQ512 ...) -(ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftConcatInt16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftConcatInt32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftConcatInt32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftConcatInt32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftConcatInt64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftConcatInt64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftConcatInt64x8 ...) => (VPSHLDQ512 ...) +(ShiftAllLeftConcatUint16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftConcatUint16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftConcatUint16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftConcatUint32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftConcatUint32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftConcatUint32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftConcatUint64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftConcatUint64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftConcatUint64x8 ...) => (VPSHLDQ512 ...) +(ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1473,42 +1473,42 @@ (ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) (ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) (ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) -(ShiftAllRightAndFillUpperFromInt16x8 ...) => (VPSHRDW128 ...) -(ShiftAllRightAndFillUpperFromInt16x16 ...) => (VPSHRDW256 ...) -(ShiftAllRightAndFillUpperFromInt16x32 ...) => (VPSHRDW512 ...) -(ShiftAllRightAndFillUpperFromInt32x4 ...) => (VPSHRDD128 ...) -(ShiftAllRightAndFillUpperFromInt32x8 ...) => (VPSHRDD256 ...) -(ShiftAllRightAndFillUpperFromInt32x16 ...) => (VPSHRDD512 ...) -(ShiftAllRightAndFillUpperFromInt64x2 ...) => (VPSHRDQ128 ...) -(ShiftAllRightAndFillUpperFromInt64x4 ...) => (VPSHRDQ256 ...) -(ShiftAllRightAndFillUpperFromInt64x8 ...) => (VPSHRDQ512 ...) -(ShiftAllRightAndFillUpperFromUint16x8 ...) => (VPSHRDW128 ...) -(ShiftAllRightAndFillUpperFromUint16x16 ...) => (VPSHRDW256 ...) -(ShiftAllRightAndFillUpperFromUint16x32 ...) => (VPSHRDW512 ...) -(ShiftAllRightAndFillUpperFromUint32x4 ...) => (VPSHRDD128 ...) -(ShiftAllRightAndFillUpperFromUint32x8 ...) => (VPSHRDD256 ...) -(ShiftAllRightAndFillUpperFromUint32x16 ...) => (VPSHRDD512 ...) -(ShiftAllRightAndFillUpperFromUint64x2 ...) => (VPSHRDQ128 ...) -(ShiftAllRightAndFillUpperFromUint64x4 ...) => (VPSHRDQ256 ...) -(ShiftAllRightAndFillUpperFromUint64x8 ...) => (VPSHRDQ512 ...) -(ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightConcatInt16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightConcatInt32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightConcatInt32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightConcatInt32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightConcatInt64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightConcatInt64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightConcatInt64x8 ...) => (VPSHRDQ512 ...) +(ShiftAllRightConcatUint16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightConcatUint16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightConcatUint16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightConcatUint32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightConcatUint32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightConcatUint32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightConcatUint64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightConcatUint64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightConcatUint64x8 ...) => (VPSHRDQ512 ...) +(ShiftAllRightConcatMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightConcatMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightConcatMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightConcatMaskedInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightConcatMaskedInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightConcatMaskedInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightConcatMaskedInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightConcatMaskedInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightConcatMaskedInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightConcatMaskedUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightConcatMaskedUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightConcatMaskedUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightConcatMaskedUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightConcatMaskedUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightConcatMaskedUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1545,42 +1545,42 @@ (ShiftLeftUint64x2 ...) => (VPSLLVQ128 ...) (ShiftLeftUint64x4 ...) => (VPSLLVQ256 ...) (ShiftLeftUint64x8 ...) => (VPSLLVQ512 ...) -(ShiftLeftAndFillUpperFromInt16x8 ...) => (VPSHLDVW128 ...) -(ShiftLeftAndFillUpperFromInt16x16 ...) => (VPSHLDVW256 ...) -(ShiftLeftAndFillUpperFromInt16x32 ...) => (VPSHLDVW512 ...) -(ShiftLeftAndFillUpperFromInt32x4 ...) => (VPSHLDVD128 ...) -(ShiftLeftAndFillUpperFromInt32x8 ...) => (VPSHLDVD256 ...) -(ShiftLeftAndFillUpperFromInt32x16 ...) => (VPSHLDVD512 ...) -(ShiftLeftAndFillUpperFromInt64x2 ...) => (VPSHLDVQ128 ...) -(ShiftLeftAndFillUpperFromInt64x4 ...) => (VPSHLDVQ256 ...) -(ShiftLeftAndFillUpperFromInt64x8 ...) => (VPSHLDVQ512 ...) -(ShiftLeftAndFillUpperFromUint16x8 ...) => (VPSHLDVW128 ...) -(ShiftLeftAndFillUpperFromUint16x16 ...) => (VPSHLDVW256 ...) -(ShiftLeftAndFillUpperFromUint16x32 ...) => (VPSHLDVW512 ...) -(ShiftLeftAndFillUpperFromUint32x4 ...) => (VPSHLDVD128 ...) -(ShiftLeftAndFillUpperFromUint32x8 ...) => (VPSHLDVD256 ...) -(ShiftLeftAndFillUpperFromUint32x16 ...) => (VPSHLDVD512 ...) -(ShiftLeftAndFillUpperFromUint64x2 ...) => (VPSHLDVQ128 ...) -(ShiftLeftAndFillUpperFromUint64x4 ...) => (VPSHLDVQ256 ...) -(ShiftLeftAndFillUpperFromUint64x8 ...) => (VPSHLDVQ512 ...) -(ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftConcatInt16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftConcatInt16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftConcatInt16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftConcatInt32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftConcatInt32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftConcatInt32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftConcatInt64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftConcatInt64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftConcatInt64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftConcatUint16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftConcatUint16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftConcatUint16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftConcatUint32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftConcatUint32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftConcatUint32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftConcatUint64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftConcatUint64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftConcatUint64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftConcatMaskedInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftConcatMaskedInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftConcatMaskedInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftConcatMaskedInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftConcatMaskedInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftConcatMaskedInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftConcatMaskedInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftConcatMaskedInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftConcatMaskedInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftConcatMaskedUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftConcatMaskedUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftConcatMaskedUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftConcatMaskedUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftConcatMaskedUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftConcatMaskedUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftConcatMaskedUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftConcatMaskedUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftConcatMaskedUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) (ShiftLeftMaskedInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftLeftMaskedInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftLeftMaskedInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1617,42 +1617,42 @@ (ShiftRightUint64x2 ...) => (VPSRLVQ128 ...) (ShiftRightUint64x4 ...) => (VPSRLVQ256 ...) (ShiftRightUint64x8 ...) => (VPSRLVQ512 ...) -(ShiftRightAndFillUpperFromInt16x8 ...) => (VPSHRDVW128 ...) -(ShiftRightAndFillUpperFromInt16x16 ...) => (VPSHRDVW256 ...) -(ShiftRightAndFillUpperFromInt16x32 ...) => (VPSHRDVW512 ...) -(ShiftRightAndFillUpperFromInt32x4 ...) => (VPSHRDVD128 ...) -(ShiftRightAndFillUpperFromInt32x8 ...) => (VPSHRDVD256 ...) -(ShiftRightAndFillUpperFromInt32x16 ...) => (VPSHRDVD512 ...) -(ShiftRightAndFillUpperFromInt64x2 ...) => (VPSHRDVQ128 ...) -(ShiftRightAndFillUpperFromInt64x4 ...) => (VPSHRDVQ256 ...) -(ShiftRightAndFillUpperFromInt64x8 ...) => (VPSHRDVQ512 ...) -(ShiftRightAndFillUpperFromUint16x8 ...) => (VPSHRDVW128 ...) -(ShiftRightAndFillUpperFromUint16x16 ...) => (VPSHRDVW256 ...) -(ShiftRightAndFillUpperFromUint16x32 ...) => (VPSHRDVW512 ...) -(ShiftRightAndFillUpperFromUint32x4 ...) => (VPSHRDVD128 ...) -(ShiftRightAndFillUpperFromUint32x8 ...) => (VPSHRDVD256 ...) -(ShiftRightAndFillUpperFromUint32x16 ...) => (VPSHRDVD512 ...) -(ShiftRightAndFillUpperFromUint64x2 ...) => (VPSHRDVQ128 ...) -(ShiftRightAndFillUpperFromUint64x4 ...) => (VPSHRDVQ256 ...) -(ShiftRightAndFillUpperFromUint64x8 ...) => (VPSHRDVQ512 ...) -(ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightConcatInt16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightConcatInt16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightConcatInt16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightConcatInt32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightConcatInt32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightConcatInt32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightConcatInt64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightConcatInt64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightConcatInt64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightConcatUint16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightConcatUint16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightConcatUint16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightConcatUint32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightConcatUint32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightConcatUint32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightConcatUint64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightConcatUint64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightConcatUint64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightConcatMaskedInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightConcatMaskedInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightConcatMaskedInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightConcatMaskedInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightConcatMaskedInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightConcatMaskedInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightConcatMaskedInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightConcatMaskedInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightConcatMaskedInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightConcatMaskedUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightConcatMaskedUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightConcatMaskedUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightConcatMaskedUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightConcatMaskedUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightConcatMaskedUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightConcatMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightConcatMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightConcatMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) (ShiftRightMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftRightMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftRightMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index f1c1246d2400f3..d681620bc39a1c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1278,42 +1278,42 @@ func simdGenericOps() []opData { {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint64x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, @@ -1350,42 +1350,42 @@ func simdGenericOps() []opData { {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, @@ -1722,78 +1722,78 @@ func simdGenericOps() []opData { {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ed0203b6390dc1..de4477bc91b2a7 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5764,42 +5764,42 @@ const ( OpShiftAllRightUint64x2 OpShiftAllRightUint64x4 OpShiftAllRightUint64x8 - OpShiftLeftAndFillUpperFromInt16x8 - OpShiftLeftAndFillUpperFromInt16x16 - OpShiftLeftAndFillUpperFromInt16x32 - OpShiftLeftAndFillUpperFromInt32x4 - OpShiftLeftAndFillUpperFromInt32x8 - OpShiftLeftAndFillUpperFromInt32x16 - OpShiftLeftAndFillUpperFromInt64x2 - OpShiftLeftAndFillUpperFromInt64x4 - OpShiftLeftAndFillUpperFromInt64x8 - OpShiftLeftAndFillUpperFromMaskedInt16x8 - OpShiftLeftAndFillUpperFromMaskedInt16x16 - OpShiftLeftAndFillUpperFromMaskedInt16x32 - OpShiftLeftAndFillUpperFromMaskedInt32x4 - OpShiftLeftAndFillUpperFromMaskedInt32x8 - OpShiftLeftAndFillUpperFromMaskedInt32x16 - OpShiftLeftAndFillUpperFromMaskedInt64x2 - OpShiftLeftAndFillUpperFromMaskedInt64x4 - OpShiftLeftAndFillUpperFromMaskedInt64x8 - OpShiftLeftAndFillUpperFromMaskedUint16x8 - OpShiftLeftAndFillUpperFromMaskedUint16x16 - OpShiftLeftAndFillUpperFromMaskedUint16x32 - OpShiftLeftAndFillUpperFromMaskedUint32x4 - OpShiftLeftAndFillUpperFromMaskedUint32x8 - OpShiftLeftAndFillUpperFromMaskedUint32x16 - OpShiftLeftAndFillUpperFromMaskedUint64x2 - OpShiftLeftAndFillUpperFromMaskedUint64x4 - OpShiftLeftAndFillUpperFromMaskedUint64x8 - OpShiftLeftAndFillUpperFromUint16x8 - OpShiftLeftAndFillUpperFromUint16x16 - OpShiftLeftAndFillUpperFromUint16x32 - OpShiftLeftAndFillUpperFromUint32x4 - OpShiftLeftAndFillUpperFromUint32x8 - OpShiftLeftAndFillUpperFromUint32x16 - OpShiftLeftAndFillUpperFromUint64x2 - OpShiftLeftAndFillUpperFromUint64x4 - OpShiftLeftAndFillUpperFromUint64x8 + OpShiftLeftConcatInt16x8 + OpShiftLeftConcatInt16x16 + OpShiftLeftConcatInt16x32 + OpShiftLeftConcatInt32x4 + OpShiftLeftConcatInt32x8 + OpShiftLeftConcatInt32x16 + OpShiftLeftConcatInt64x2 + OpShiftLeftConcatInt64x4 + OpShiftLeftConcatInt64x8 + OpShiftLeftConcatMaskedInt16x8 + OpShiftLeftConcatMaskedInt16x16 + OpShiftLeftConcatMaskedInt16x32 + OpShiftLeftConcatMaskedInt32x4 + OpShiftLeftConcatMaskedInt32x8 + OpShiftLeftConcatMaskedInt32x16 + OpShiftLeftConcatMaskedInt64x2 + OpShiftLeftConcatMaskedInt64x4 + OpShiftLeftConcatMaskedInt64x8 + OpShiftLeftConcatMaskedUint16x8 + OpShiftLeftConcatMaskedUint16x16 + OpShiftLeftConcatMaskedUint16x32 + OpShiftLeftConcatMaskedUint32x4 + OpShiftLeftConcatMaskedUint32x8 + OpShiftLeftConcatMaskedUint32x16 + OpShiftLeftConcatMaskedUint64x2 + OpShiftLeftConcatMaskedUint64x4 + OpShiftLeftConcatMaskedUint64x8 + OpShiftLeftConcatUint16x8 + OpShiftLeftConcatUint16x16 + OpShiftLeftConcatUint16x32 + OpShiftLeftConcatUint32x4 + OpShiftLeftConcatUint32x8 + OpShiftLeftConcatUint32x16 + OpShiftLeftConcatUint64x2 + OpShiftLeftConcatUint64x4 + OpShiftLeftConcatUint64x8 OpShiftLeftInt16x8 OpShiftLeftInt16x16 OpShiftLeftInt16x32 @@ -5836,42 +5836,42 @@ const ( OpShiftLeftUint64x2 OpShiftLeftUint64x4 OpShiftLeftUint64x8 - OpShiftRightAndFillUpperFromInt16x8 - OpShiftRightAndFillUpperFromInt16x16 - OpShiftRightAndFillUpperFromInt16x32 - OpShiftRightAndFillUpperFromInt32x4 - OpShiftRightAndFillUpperFromInt32x8 - OpShiftRightAndFillUpperFromInt32x16 - OpShiftRightAndFillUpperFromInt64x2 - OpShiftRightAndFillUpperFromInt64x4 - OpShiftRightAndFillUpperFromInt64x8 - OpShiftRightAndFillUpperFromMaskedInt16x8 - OpShiftRightAndFillUpperFromMaskedInt16x16 - OpShiftRightAndFillUpperFromMaskedInt16x32 - OpShiftRightAndFillUpperFromMaskedInt32x4 - OpShiftRightAndFillUpperFromMaskedInt32x8 - OpShiftRightAndFillUpperFromMaskedInt32x16 - OpShiftRightAndFillUpperFromMaskedInt64x2 - OpShiftRightAndFillUpperFromMaskedInt64x4 - OpShiftRightAndFillUpperFromMaskedInt64x8 - OpShiftRightAndFillUpperFromMaskedUint16x8 - OpShiftRightAndFillUpperFromMaskedUint16x16 - OpShiftRightAndFillUpperFromMaskedUint16x32 - OpShiftRightAndFillUpperFromMaskedUint32x4 - OpShiftRightAndFillUpperFromMaskedUint32x8 - OpShiftRightAndFillUpperFromMaskedUint32x16 - OpShiftRightAndFillUpperFromMaskedUint64x2 - OpShiftRightAndFillUpperFromMaskedUint64x4 - OpShiftRightAndFillUpperFromMaskedUint64x8 - OpShiftRightAndFillUpperFromUint16x8 - OpShiftRightAndFillUpperFromUint16x16 - OpShiftRightAndFillUpperFromUint16x32 - OpShiftRightAndFillUpperFromUint32x4 - OpShiftRightAndFillUpperFromUint32x8 - OpShiftRightAndFillUpperFromUint32x16 - OpShiftRightAndFillUpperFromUint64x2 - OpShiftRightAndFillUpperFromUint64x4 - OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightConcatInt16x8 + OpShiftRightConcatInt16x16 + OpShiftRightConcatInt16x32 + OpShiftRightConcatInt32x4 + OpShiftRightConcatInt32x8 + OpShiftRightConcatInt32x16 + OpShiftRightConcatInt64x2 + OpShiftRightConcatInt64x4 + OpShiftRightConcatInt64x8 + OpShiftRightConcatMaskedInt16x8 + OpShiftRightConcatMaskedInt16x16 + OpShiftRightConcatMaskedInt16x32 + OpShiftRightConcatMaskedInt32x4 + OpShiftRightConcatMaskedInt32x8 + OpShiftRightConcatMaskedInt32x16 + OpShiftRightConcatMaskedInt64x2 + OpShiftRightConcatMaskedInt64x4 + OpShiftRightConcatMaskedInt64x8 + OpShiftRightConcatMaskedUint16x8 + OpShiftRightConcatMaskedUint16x16 + OpShiftRightConcatMaskedUint16x32 + OpShiftRightConcatMaskedUint32x4 + OpShiftRightConcatMaskedUint32x8 + OpShiftRightConcatMaskedUint32x16 + OpShiftRightConcatMaskedUint64x2 + OpShiftRightConcatMaskedUint64x4 + OpShiftRightConcatMaskedUint64x8 + OpShiftRightConcatUint16x8 + OpShiftRightConcatUint16x16 + OpShiftRightConcatUint16x32 + OpShiftRightConcatUint32x4 + OpShiftRightConcatUint32x8 + OpShiftRightConcatUint32x16 + OpShiftRightConcatUint64x2 + OpShiftRightConcatUint64x4 + OpShiftRightConcatUint64x8 OpShiftRightInt16x8 OpShiftRightInt16x16 OpShiftRightInt16x32 @@ -6208,78 +6208,78 @@ const ( OpSetElemUint16x8 OpSetElemUint32x4 OpSetElemUint64x2 - OpShiftAllLeftAndFillUpperFromInt16x8 - OpShiftAllLeftAndFillUpperFromInt16x16 - OpShiftAllLeftAndFillUpperFromInt16x32 - OpShiftAllLeftAndFillUpperFromInt32x4 - OpShiftAllLeftAndFillUpperFromInt32x8 - OpShiftAllLeftAndFillUpperFromInt32x16 - OpShiftAllLeftAndFillUpperFromInt64x2 - OpShiftAllLeftAndFillUpperFromInt64x4 - OpShiftAllLeftAndFillUpperFromInt64x8 - OpShiftAllLeftAndFillUpperFromMaskedInt16x8 - OpShiftAllLeftAndFillUpperFromMaskedInt16x16 - OpShiftAllLeftAndFillUpperFromMaskedInt16x32 - OpShiftAllLeftAndFillUpperFromMaskedInt32x4 - OpShiftAllLeftAndFillUpperFromMaskedInt32x8 - OpShiftAllLeftAndFillUpperFromMaskedInt32x16 - OpShiftAllLeftAndFillUpperFromMaskedInt64x2 - OpShiftAllLeftAndFillUpperFromMaskedInt64x4 - OpShiftAllLeftAndFillUpperFromMaskedInt64x8 - OpShiftAllLeftAndFillUpperFromMaskedUint16x8 - OpShiftAllLeftAndFillUpperFromMaskedUint16x16 - OpShiftAllLeftAndFillUpperFromMaskedUint16x32 - OpShiftAllLeftAndFillUpperFromMaskedUint32x4 - OpShiftAllLeftAndFillUpperFromMaskedUint32x8 - OpShiftAllLeftAndFillUpperFromMaskedUint32x16 - OpShiftAllLeftAndFillUpperFromMaskedUint64x2 - OpShiftAllLeftAndFillUpperFromMaskedUint64x4 - OpShiftAllLeftAndFillUpperFromMaskedUint64x8 - OpShiftAllLeftAndFillUpperFromUint16x8 - OpShiftAllLeftAndFillUpperFromUint16x16 - OpShiftAllLeftAndFillUpperFromUint16x32 - OpShiftAllLeftAndFillUpperFromUint32x4 - OpShiftAllLeftAndFillUpperFromUint32x8 - OpShiftAllLeftAndFillUpperFromUint32x16 - OpShiftAllLeftAndFillUpperFromUint64x2 - OpShiftAllLeftAndFillUpperFromUint64x4 - OpShiftAllLeftAndFillUpperFromUint64x8 - OpShiftAllRightAndFillUpperFromInt16x8 - OpShiftAllRightAndFillUpperFromInt16x16 - OpShiftAllRightAndFillUpperFromInt16x32 - OpShiftAllRightAndFillUpperFromInt32x4 - OpShiftAllRightAndFillUpperFromInt32x8 - OpShiftAllRightAndFillUpperFromInt32x16 - OpShiftAllRightAndFillUpperFromInt64x2 - OpShiftAllRightAndFillUpperFromInt64x4 - OpShiftAllRightAndFillUpperFromInt64x8 - OpShiftAllRightAndFillUpperFromMaskedInt16x8 - OpShiftAllRightAndFillUpperFromMaskedInt16x16 - OpShiftAllRightAndFillUpperFromMaskedInt16x32 - OpShiftAllRightAndFillUpperFromMaskedInt32x4 - OpShiftAllRightAndFillUpperFromMaskedInt32x8 - OpShiftAllRightAndFillUpperFromMaskedInt32x16 - OpShiftAllRightAndFillUpperFromMaskedInt64x2 - OpShiftAllRightAndFillUpperFromMaskedInt64x4 - OpShiftAllRightAndFillUpperFromMaskedInt64x8 - OpShiftAllRightAndFillUpperFromMaskedUint16x8 - OpShiftAllRightAndFillUpperFromMaskedUint16x16 - OpShiftAllRightAndFillUpperFromMaskedUint16x32 - OpShiftAllRightAndFillUpperFromMaskedUint32x4 - OpShiftAllRightAndFillUpperFromMaskedUint32x8 - OpShiftAllRightAndFillUpperFromMaskedUint32x16 - OpShiftAllRightAndFillUpperFromMaskedUint64x2 - OpShiftAllRightAndFillUpperFromMaskedUint64x4 - OpShiftAllRightAndFillUpperFromMaskedUint64x8 - OpShiftAllRightAndFillUpperFromUint16x8 - OpShiftAllRightAndFillUpperFromUint16x16 - OpShiftAllRightAndFillUpperFromUint16x32 - OpShiftAllRightAndFillUpperFromUint32x4 - OpShiftAllRightAndFillUpperFromUint32x8 - OpShiftAllRightAndFillUpperFromUint32x16 - OpShiftAllRightAndFillUpperFromUint64x2 - OpShiftAllRightAndFillUpperFromUint64x4 - OpShiftAllRightAndFillUpperFromUint64x8 + OpShiftAllLeftConcatInt16x8 + OpShiftAllLeftConcatInt16x16 + OpShiftAllLeftConcatInt16x32 + OpShiftAllLeftConcatInt32x4 + OpShiftAllLeftConcatInt32x8 + OpShiftAllLeftConcatInt32x16 + OpShiftAllLeftConcatInt64x2 + OpShiftAllLeftConcatInt64x4 + OpShiftAllLeftConcatInt64x8 + OpShiftAllLeftConcatMaskedInt16x8 + OpShiftAllLeftConcatMaskedInt16x16 + OpShiftAllLeftConcatMaskedInt16x32 + OpShiftAllLeftConcatMaskedInt32x4 + OpShiftAllLeftConcatMaskedInt32x8 + OpShiftAllLeftConcatMaskedInt32x16 + OpShiftAllLeftConcatMaskedInt64x2 + OpShiftAllLeftConcatMaskedInt64x4 + OpShiftAllLeftConcatMaskedInt64x8 + OpShiftAllLeftConcatMaskedUint16x8 + OpShiftAllLeftConcatMaskedUint16x16 + OpShiftAllLeftConcatMaskedUint16x32 + OpShiftAllLeftConcatMaskedUint32x4 + OpShiftAllLeftConcatMaskedUint32x8 + OpShiftAllLeftConcatMaskedUint32x16 + OpShiftAllLeftConcatMaskedUint64x2 + OpShiftAllLeftConcatMaskedUint64x4 + OpShiftAllLeftConcatMaskedUint64x8 + OpShiftAllLeftConcatUint16x8 + OpShiftAllLeftConcatUint16x16 + OpShiftAllLeftConcatUint16x32 + OpShiftAllLeftConcatUint32x4 + OpShiftAllLeftConcatUint32x8 + OpShiftAllLeftConcatUint32x16 + OpShiftAllLeftConcatUint64x2 + OpShiftAllLeftConcatUint64x4 + OpShiftAllLeftConcatUint64x8 + OpShiftAllRightConcatInt16x8 + OpShiftAllRightConcatInt16x16 + OpShiftAllRightConcatInt16x32 + OpShiftAllRightConcatInt32x4 + OpShiftAllRightConcatInt32x8 + OpShiftAllRightConcatInt32x16 + OpShiftAllRightConcatInt64x2 + OpShiftAllRightConcatInt64x4 + OpShiftAllRightConcatInt64x8 + OpShiftAllRightConcatMaskedInt16x8 + OpShiftAllRightConcatMaskedInt16x16 + OpShiftAllRightConcatMaskedInt16x32 + OpShiftAllRightConcatMaskedInt32x4 + OpShiftAllRightConcatMaskedInt32x8 + OpShiftAllRightConcatMaskedInt32x16 + OpShiftAllRightConcatMaskedInt64x2 + OpShiftAllRightConcatMaskedInt64x4 + OpShiftAllRightConcatMaskedInt64x8 + OpShiftAllRightConcatMaskedUint16x8 + OpShiftAllRightConcatMaskedUint16x16 + OpShiftAllRightConcatMaskedUint16x32 + OpShiftAllRightConcatMaskedUint32x4 + OpShiftAllRightConcatMaskedUint32x8 + OpShiftAllRightConcatMaskedUint32x16 + OpShiftAllRightConcatMaskedUint64x2 + OpShiftAllRightConcatMaskedUint64x4 + OpShiftAllRightConcatMaskedUint64x8 + OpShiftAllRightConcatUint16x8 + OpShiftAllRightConcatUint16x16 + OpShiftAllRightConcatUint16x32 + OpShiftAllRightConcatUint32x4 + OpShiftAllRightConcatUint32x8 + OpShiftAllRightConcatUint32x16 + OpShiftAllRightConcatUint64x2 + OpShiftAllRightConcatUint64x4 + OpShiftAllRightConcatUint64x8 OpTruncWithPrecisionFloat32x4 OpTruncWithPrecisionFloat32x8 OpTruncWithPrecisionFloat32x16 @@ -68518,182 +68518,182 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x8", + name: "ShiftLeftConcatInt16x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x16", + name: "ShiftLeftConcatInt16x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x32", + name: "ShiftLeftConcatInt16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x4", + name: "ShiftLeftConcatInt32x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x8", + name: "ShiftLeftConcatInt32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x16", + name: "ShiftLeftConcatInt32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x2", + name: "ShiftLeftConcatInt64x2", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x4", + name: "ShiftLeftConcatInt64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x8", + name: "ShiftLeftConcatInt64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x8", + name: "ShiftLeftConcatMaskedInt16x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x16", + name: "ShiftLeftConcatMaskedInt16x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x32", + name: "ShiftLeftConcatMaskedInt16x32", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x4", + name: "ShiftLeftConcatMaskedInt32x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x8", + name: "ShiftLeftConcatMaskedInt32x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x16", + name: "ShiftLeftConcatMaskedInt32x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x2", + name: "ShiftLeftConcatMaskedInt64x2", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x4", + name: "ShiftLeftConcatMaskedInt64x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x8", + name: "ShiftLeftConcatMaskedInt64x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x8", + name: "ShiftLeftConcatMaskedUint16x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + name: "ShiftLeftConcatMaskedUint16x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x32", + name: "ShiftLeftConcatMaskedUint16x32", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x4", + name: "ShiftLeftConcatMaskedUint32x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x8", + name: "ShiftLeftConcatMaskedUint32x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x16", + name: "ShiftLeftConcatMaskedUint32x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x2", + name: "ShiftLeftConcatMaskedUint64x2", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x4", + name: "ShiftLeftConcatMaskedUint64x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x8", + name: "ShiftLeftConcatMaskedUint64x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x8", + name: "ShiftLeftConcatUint16x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x16", + name: "ShiftLeftConcatUint16x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x32", + name: "ShiftLeftConcatUint16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x4", + name: "ShiftLeftConcatUint32x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x8", + name: "ShiftLeftConcatUint32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x16", + name: "ShiftLeftConcatUint32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x2", + name: "ShiftLeftConcatUint64x2", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x4", + name: "ShiftLeftConcatUint64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x8", + name: "ShiftLeftConcatUint64x8", argLen: 3, generic: true, }, @@ -68878,182 +68878,182 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x8", + name: "ShiftRightConcatInt16x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x16", + name: "ShiftRightConcatInt16x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x32", + name: "ShiftRightConcatInt16x32", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x4", + name: "ShiftRightConcatInt32x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x8", + name: "ShiftRightConcatInt32x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x16", + name: "ShiftRightConcatInt32x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x2", + name: "ShiftRightConcatInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x4", + name: "ShiftRightConcatInt64x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x8", + name: "ShiftRightConcatInt64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x8", + name: "ShiftRightConcatMaskedInt16x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x16", + name: "ShiftRightConcatMaskedInt16x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x32", + name: "ShiftRightConcatMaskedInt16x32", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x4", + name: "ShiftRightConcatMaskedInt32x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x8", + name: "ShiftRightConcatMaskedInt32x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x16", + name: "ShiftRightConcatMaskedInt32x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x2", + name: "ShiftRightConcatMaskedInt64x2", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x4", + name: "ShiftRightConcatMaskedInt64x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x8", + name: "ShiftRightConcatMaskedInt64x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x8", + name: "ShiftRightConcatMaskedUint16x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x16", + name: "ShiftRightConcatMaskedUint16x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x32", + name: "ShiftRightConcatMaskedUint16x32", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x4", + name: "ShiftRightConcatMaskedUint32x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x8", + name: "ShiftRightConcatMaskedUint32x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x16", + name: "ShiftRightConcatMaskedUint32x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x2", + name: "ShiftRightConcatMaskedUint64x2", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x4", + name: "ShiftRightConcatMaskedUint64x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x8", + name: "ShiftRightConcatMaskedUint64x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x8", + name: "ShiftRightConcatUint16x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x16", + name: "ShiftRightConcatUint16x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x32", + name: "ShiftRightConcatUint16x32", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x4", + name: "ShiftRightConcatUint32x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x8", + name: "ShiftRightConcatUint32x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x16", + name: "ShiftRightConcatUint32x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x2", + name: "ShiftRightConcatUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x4", + name: "ShiftRightConcatUint64x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x8", + name: "ShiftRightConcatUint64x8", argLen: 3, generic: true, }, @@ -70950,433 +70950,433 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x8", + name: "ShiftAllLeftConcatInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x16", + name: "ShiftAllLeftConcatInt16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x32", + name: "ShiftAllLeftConcatInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x4", + name: "ShiftAllLeftConcatInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x8", + name: "ShiftAllLeftConcatInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x16", + name: "ShiftAllLeftConcatInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x2", + name: "ShiftAllLeftConcatInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x4", + name: "ShiftAllLeftConcatInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x8", + name: "ShiftAllLeftConcatInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", + name: "ShiftAllLeftConcatMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", + name: "ShiftAllLeftConcatMaskedInt16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", + name: "ShiftAllLeftConcatMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", + name: "ShiftAllLeftConcatMaskedInt32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", + name: "ShiftAllLeftConcatMaskedInt32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", + name: "ShiftAllLeftConcatMaskedInt32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", + name: "ShiftAllLeftConcatMaskedInt64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", + name: "ShiftAllLeftConcatMaskedInt64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", + name: "ShiftAllLeftConcatMaskedInt64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", + name: "ShiftAllLeftConcatMaskedUint16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", + name: "ShiftAllLeftConcatMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", + name: "ShiftAllLeftConcatMaskedUint16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", + name: "ShiftAllLeftConcatMaskedUint32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", + name: "ShiftAllLeftConcatMaskedUint32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", + name: "ShiftAllLeftConcatMaskedUint32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", + name: "ShiftAllLeftConcatMaskedUint64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", + name: "ShiftAllLeftConcatMaskedUint64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", + name: "ShiftAllLeftConcatMaskedUint64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x8", + name: "ShiftAllLeftConcatUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x16", + name: "ShiftAllLeftConcatUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x32", + name: "ShiftAllLeftConcatUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x4", + name: "ShiftAllLeftConcatUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x8", + name: "ShiftAllLeftConcatUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x16", + name: "ShiftAllLeftConcatUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x2", + name: "ShiftAllLeftConcatUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x4", + name: "ShiftAllLeftConcatUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x8", + name: "ShiftAllLeftConcatUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x8", + name: "ShiftAllRightConcatInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x16", + name: "ShiftAllRightConcatInt16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x32", + name: "ShiftAllRightConcatInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x4", + name: "ShiftAllRightConcatInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x8", + name: "ShiftAllRightConcatInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x16", + name: "ShiftAllRightConcatInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x2", + name: "ShiftAllRightConcatInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x4", + name: "ShiftAllRightConcatInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x8", + name: "ShiftAllRightConcatInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", + name: "ShiftAllRightConcatMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", + name: "ShiftAllRightConcatMaskedInt16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", + name: "ShiftAllRightConcatMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", + name: "ShiftAllRightConcatMaskedInt32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", + name: "ShiftAllRightConcatMaskedInt32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", + name: "ShiftAllRightConcatMaskedInt32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", + name: "ShiftAllRightConcatMaskedInt64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", + name: "ShiftAllRightConcatMaskedInt64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", + name: "ShiftAllRightConcatMaskedInt64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", + name: "ShiftAllRightConcatMaskedUint16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", + name: "ShiftAllRightConcatMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", + name: "ShiftAllRightConcatMaskedUint16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", + name: "ShiftAllRightConcatMaskedUint32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", + name: "ShiftAllRightConcatMaskedUint32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", + name: "ShiftAllRightConcatMaskedUint32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", + name: "ShiftAllRightConcatMaskedUint64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", + name: "ShiftAllRightConcatMaskedUint64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", + name: "ShiftAllRightConcatMaskedUint64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x8", + name: "ShiftAllRightConcatUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x16", + name: "ShiftAllRightConcatUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllRightConcatUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x4", + name: "ShiftAllRightConcatUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x8", + name: "ShiftAllRightConcatUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x16", + name: "ShiftAllRightConcatUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x2", + name: "ShiftAllRightConcatUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x4", + name: "ShiftAllRightConcatUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x8", + name: "ShiftAllRightConcatUint64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 986f256887521b..e9a2fd70e4e774 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4443,94 +4443,94 @@ func rewriteValueAMD64(v *Value) bool { case OpSetElemUint8x16: v.Op = OpAMD64VPINSRB128 return true - case OpShiftAllLeftAndFillUpperFromInt16x16: + case OpShiftAllLeftConcatInt16x16: v.Op = OpAMD64VPSHLDW256 return true - case OpShiftAllLeftAndFillUpperFromInt16x32: + case OpShiftAllLeftConcatInt16x32: v.Op = OpAMD64VPSHLDW512 return true - case OpShiftAllLeftAndFillUpperFromInt16x8: + case OpShiftAllLeftConcatInt16x8: v.Op = OpAMD64VPSHLDW128 return true - case OpShiftAllLeftAndFillUpperFromInt32x16: + case OpShiftAllLeftConcatInt32x16: v.Op = OpAMD64VPSHLDD512 return true - case OpShiftAllLeftAndFillUpperFromInt32x4: + case OpShiftAllLeftConcatInt32x4: v.Op = OpAMD64VPSHLDD128 return true - case OpShiftAllLeftAndFillUpperFromInt32x8: + case OpShiftAllLeftConcatInt32x8: v.Op = OpAMD64VPSHLDD256 return true - case OpShiftAllLeftAndFillUpperFromInt64x2: + case OpShiftAllLeftConcatInt64x2: v.Op = OpAMD64VPSHLDQ128 return true - case OpShiftAllLeftAndFillUpperFromInt64x4: + case OpShiftAllLeftConcatInt64x4: v.Op = OpAMD64VPSHLDQ256 return true - case OpShiftAllLeftAndFillUpperFromInt64x8: + case OpShiftAllLeftConcatInt64x8: v.Op = OpAMD64VPSHLDQ512 return true - case OpShiftAllLeftAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v) - case OpShiftAllLeftAndFillUpperFromUint16x16: + case OpShiftAllLeftConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v) + case OpShiftAllLeftConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v) + case OpShiftAllLeftConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v) + case OpShiftAllLeftConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v) + case OpShiftAllLeftConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v) + case OpShiftAllLeftConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v) + case OpShiftAllLeftConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v) + case OpShiftAllLeftConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v) + case OpShiftAllLeftConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v) + case OpShiftAllLeftConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v) + case OpShiftAllLeftConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v) + case OpShiftAllLeftConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v) + case OpShiftAllLeftConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v) + case OpShiftAllLeftConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v) + case OpShiftAllLeftConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v) + case OpShiftAllLeftConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v) + case OpShiftAllLeftConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v) + case OpShiftAllLeftConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v) + case OpShiftAllLeftConcatUint16x16: v.Op = OpAMD64VPSHLDW256 return true - case OpShiftAllLeftAndFillUpperFromUint16x32: + case OpShiftAllLeftConcatUint16x32: v.Op = OpAMD64VPSHLDW512 return true - case OpShiftAllLeftAndFillUpperFromUint16x8: + case OpShiftAllLeftConcatUint16x8: v.Op = OpAMD64VPSHLDW128 return true - case OpShiftAllLeftAndFillUpperFromUint32x16: + case OpShiftAllLeftConcatUint32x16: v.Op = OpAMD64VPSHLDD512 return true - case OpShiftAllLeftAndFillUpperFromUint32x4: + case OpShiftAllLeftConcatUint32x4: v.Op = OpAMD64VPSHLDD128 return true - case OpShiftAllLeftAndFillUpperFromUint32x8: + case OpShiftAllLeftConcatUint32x8: v.Op = OpAMD64VPSHLDD256 return true - case OpShiftAllLeftAndFillUpperFromUint64x2: + case OpShiftAllLeftConcatUint64x2: v.Op = OpAMD64VPSHLDQ128 return true - case OpShiftAllLeftAndFillUpperFromUint64x4: + case OpShiftAllLeftConcatUint64x4: v.Op = OpAMD64VPSHLDQ256 return true - case OpShiftAllLeftAndFillUpperFromUint64x8: + case OpShiftAllLeftConcatUint64x8: v.Op = OpAMD64VPSHLDQ512 return true case OpShiftAllLeftInt16x16: @@ -4623,94 +4623,94 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftUint64x8: v.Op = OpAMD64VPSLLQ512 return true - case OpShiftAllRightAndFillUpperFromInt16x16: + case OpShiftAllRightConcatInt16x16: v.Op = OpAMD64VPSHRDW256 return true - case OpShiftAllRightAndFillUpperFromInt16x32: + case OpShiftAllRightConcatInt16x32: v.Op = OpAMD64VPSHRDW512 return true - case OpShiftAllRightAndFillUpperFromInt16x8: + case OpShiftAllRightConcatInt16x8: v.Op = OpAMD64VPSHRDW128 return true - case OpShiftAllRightAndFillUpperFromInt32x16: + case OpShiftAllRightConcatInt32x16: v.Op = OpAMD64VPSHRDD512 return true - case OpShiftAllRightAndFillUpperFromInt32x4: + case OpShiftAllRightConcatInt32x4: v.Op = OpAMD64VPSHRDD128 return true - case OpShiftAllRightAndFillUpperFromInt32x8: + case OpShiftAllRightConcatInt32x8: v.Op = OpAMD64VPSHRDD256 return true - case OpShiftAllRightAndFillUpperFromInt64x2: + case OpShiftAllRightConcatInt64x2: v.Op = OpAMD64VPSHRDQ128 return true - case OpShiftAllRightAndFillUpperFromInt64x4: + case OpShiftAllRightConcatInt64x4: v.Op = OpAMD64VPSHRDQ256 return true - case OpShiftAllRightAndFillUpperFromInt64x8: + case OpShiftAllRightConcatInt64x8: v.Op = OpAMD64VPSHRDQ512 return true - case OpShiftAllRightAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v) - case OpShiftAllRightAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v) - case OpShiftAllRightAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v) - case OpShiftAllRightAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v) - case OpShiftAllRightAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v) - case OpShiftAllRightAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v) - case OpShiftAllRightAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v) - case OpShiftAllRightAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v) - case OpShiftAllRightAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v) - case OpShiftAllRightAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v) - case OpShiftAllRightAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v) - case OpShiftAllRightAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v) - case OpShiftAllRightAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v) - case OpShiftAllRightAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v) - case OpShiftAllRightAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v) - case OpShiftAllRightAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v) - case OpShiftAllRightAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v) - case OpShiftAllRightAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v) - case OpShiftAllRightAndFillUpperFromUint16x16: + case OpShiftAllRightConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v) + case OpShiftAllRightConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v) + case OpShiftAllRightConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v) + case OpShiftAllRightConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v) + case OpShiftAllRightConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v) + case OpShiftAllRightConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v) + case OpShiftAllRightConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v) + case OpShiftAllRightConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v) + case OpShiftAllRightConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v) + case OpShiftAllRightConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v) + case OpShiftAllRightConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v) + case OpShiftAllRightConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v) + case OpShiftAllRightConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v) + case OpShiftAllRightConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v) + case OpShiftAllRightConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v) + case OpShiftAllRightConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v) + case OpShiftAllRightConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v) + case OpShiftAllRightConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v) + case OpShiftAllRightConcatUint16x16: v.Op = OpAMD64VPSHRDW256 return true - case OpShiftAllRightAndFillUpperFromUint16x32: + case OpShiftAllRightConcatUint16x32: v.Op = OpAMD64VPSHRDW512 return true - case OpShiftAllRightAndFillUpperFromUint16x8: + case OpShiftAllRightConcatUint16x8: v.Op = OpAMD64VPSHRDW128 return true - case OpShiftAllRightAndFillUpperFromUint32x16: + case OpShiftAllRightConcatUint32x16: v.Op = OpAMD64VPSHRDD512 return true - case OpShiftAllRightAndFillUpperFromUint32x4: + case OpShiftAllRightConcatUint32x4: v.Op = OpAMD64VPSHRDD128 return true - case OpShiftAllRightAndFillUpperFromUint32x8: + case OpShiftAllRightConcatUint32x8: v.Op = OpAMD64VPSHRDD256 return true - case OpShiftAllRightAndFillUpperFromUint64x2: + case OpShiftAllRightConcatUint64x2: v.Op = OpAMD64VPSHRDQ128 return true - case OpShiftAllRightAndFillUpperFromUint64x4: + case OpShiftAllRightConcatUint64x4: v.Op = OpAMD64VPSHRDQ256 return true - case OpShiftAllRightAndFillUpperFromUint64x8: + case OpShiftAllRightConcatUint64x8: v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: @@ -4803,94 +4803,94 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightUint64x8: v.Op = OpAMD64VPSRLQ512 return true - case OpShiftLeftAndFillUpperFromInt16x16: + case OpShiftLeftConcatInt16x16: v.Op = OpAMD64VPSHLDVW256 return true - case OpShiftLeftAndFillUpperFromInt16x32: + case OpShiftLeftConcatInt16x32: v.Op = OpAMD64VPSHLDVW512 return true - case OpShiftLeftAndFillUpperFromInt16x8: + case OpShiftLeftConcatInt16x8: v.Op = OpAMD64VPSHLDVW128 return true - case OpShiftLeftAndFillUpperFromInt32x16: + case OpShiftLeftConcatInt32x16: v.Op = OpAMD64VPSHLDVD512 return true - case OpShiftLeftAndFillUpperFromInt32x4: + case OpShiftLeftConcatInt32x4: v.Op = OpAMD64VPSHLDVD128 return true - case OpShiftLeftAndFillUpperFromInt32x8: + case OpShiftLeftConcatInt32x8: v.Op = OpAMD64VPSHLDVD256 return true - case OpShiftLeftAndFillUpperFromInt64x2: + case OpShiftLeftConcatInt64x2: v.Op = OpAMD64VPSHLDVQ128 return true - case OpShiftLeftAndFillUpperFromInt64x4: + case OpShiftLeftConcatInt64x4: v.Op = OpAMD64VPSHLDVQ256 return true - case OpShiftLeftAndFillUpperFromInt64x8: + case OpShiftLeftConcatInt64x8: v.Op = OpAMD64VPSHLDVQ512 return true - case OpShiftLeftAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v) - case OpShiftLeftAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v) - case OpShiftLeftAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v) - case OpShiftLeftAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v) - case OpShiftLeftAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v) - case OpShiftLeftAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v) - case OpShiftLeftAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v) - case OpShiftLeftAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v) - case OpShiftLeftAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v) - case OpShiftLeftAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v) - case OpShiftLeftAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v) - case OpShiftLeftAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v) - case OpShiftLeftAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v) - case OpShiftLeftAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v) - case OpShiftLeftAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v) - case OpShiftLeftAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v) - case OpShiftLeftAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v) - case OpShiftLeftAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v) - case OpShiftLeftAndFillUpperFromUint16x16: + case OpShiftLeftConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v) + case OpShiftLeftConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x32(v) + case OpShiftLeftConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x8(v) + case OpShiftLeftConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x16(v) + case OpShiftLeftConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x4(v) + case OpShiftLeftConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x8(v) + case OpShiftLeftConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x2(v) + case OpShiftLeftConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x4(v) + case OpShiftLeftConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x8(v) + case OpShiftLeftConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x16(v) + case OpShiftLeftConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x32(v) + case OpShiftLeftConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x8(v) + case OpShiftLeftConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x16(v) + case OpShiftLeftConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x4(v) + case OpShiftLeftConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x8(v) + case OpShiftLeftConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x2(v) + case OpShiftLeftConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x4(v) + case OpShiftLeftConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x8(v) + case OpShiftLeftConcatUint16x16: v.Op = OpAMD64VPSHLDVW256 return true - case OpShiftLeftAndFillUpperFromUint16x32: + case OpShiftLeftConcatUint16x32: v.Op = OpAMD64VPSHLDVW512 return true - case OpShiftLeftAndFillUpperFromUint16x8: + case OpShiftLeftConcatUint16x8: v.Op = OpAMD64VPSHLDVW128 return true - case OpShiftLeftAndFillUpperFromUint32x16: + case OpShiftLeftConcatUint32x16: v.Op = OpAMD64VPSHLDVD512 return true - case OpShiftLeftAndFillUpperFromUint32x4: + case OpShiftLeftConcatUint32x4: v.Op = OpAMD64VPSHLDVD128 return true - case OpShiftLeftAndFillUpperFromUint32x8: + case OpShiftLeftConcatUint32x8: v.Op = OpAMD64VPSHLDVD256 return true - case OpShiftLeftAndFillUpperFromUint64x2: + case OpShiftLeftConcatUint64x2: v.Op = OpAMD64VPSHLDVQ128 return true - case OpShiftLeftAndFillUpperFromUint64x4: + case OpShiftLeftConcatUint64x4: v.Op = OpAMD64VPSHLDVQ256 return true - case OpShiftLeftAndFillUpperFromUint64x8: + case OpShiftLeftConcatUint64x8: v.Op = OpAMD64VPSHLDVQ512 return true case OpShiftLeftInt16x16: @@ -4983,94 +4983,94 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftUint64x8: v.Op = OpAMD64VPSLLVQ512 return true - case OpShiftRightAndFillUpperFromInt16x16: + case OpShiftRightConcatInt16x16: v.Op = OpAMD64VPSHRDVW256 return true - case OpShiftRightAndFillUpperFromInt16x32: + case OpShiftRightConcatInt16x32: v.Op = OpAMD64VPSHRDVW512 return true - case OpShiftRightAndFillUpperFromInt16x8: + case OpShiftRightConcatInt16x8: v.Op = OpAMD64VPSHRDVW128 return true - case OpShiftRightAndFillUpperFromInt32x16: + case OpShiftRightConcatInt32x16: v.Op = OpAMD64VPSHRDVD512 return true - case OpShiftRightAndFillUpperFromInt32x4: + case OpShiftRightConcatInt32x4: v.Op = OpAMD64VPSHRDVD128 return true - case OpShiftRightAndFillUpperFromInt32x8: + case OpShiftRightConcatInt32x8: v.Op = OpAMD64VPSHRDVD256 return true - case OpShiftRightAndFillUpperFromInt64x2: + case OpShiftRightConcatInt64x2: v.Op = OpAMD64VPSHRDVQ128 return true - case OpShiftRightAndFillUpperFromInt64x4: + case OpShiftRightConcatInt64x4: v.Op = OpAMD64VPSHRDVQ256 return true - case OpShiftRightAndFillUpperFromInt64x8: + case OpShiftRightConcatInt64x8: v.Op = OpAMD64VPSHRDVQ512 return true - case OpShiftRightAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v) - case OpShiftRightAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v) - case OpShiftRightAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v) - case OpShiftRightAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v) - case OpShiftRightAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v) - case OpShiftRightAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v) - case OpShiftRightAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v) - case OpShiftRightAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v) - case OpShiftRightAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v) - case OpShiftRightAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v) - case OpShiftRightAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v) - case OpShiftRightAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v) - case OpShiftRightAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v) - case OpShiftRightAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v) - case OpShiftRightAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v) - case OpShiftRightAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v) - case OpShiftRightAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v) - case OpShiftRightAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v) - case OpShiftRightAndFillUpperFromUint16x16: + case OpShiftRightConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x16(v) + case OpShiftRightConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x32(v) + case OpShiftRightConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x8(v) + case OpShiftRightConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x16(v) + case OpShiftRightConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x4(v) + case OpShiftRightConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x8(v) + case OpShiftRightConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x2(v) + case OpShiftRightConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x4(v) + case OpShiftRightConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x8(v) + case OpShiftRightConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x16(v) + case OpShiftRightConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x32(v) + case OpShiftRightConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x8(v) + case OpShiftRightConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x16(v) + case OpShiftRightConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x4(v) + case OpShiftRightConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x8(v) + case OpShiftRightConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x2(v) + case OpShiftRightConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x4(v) + case OpShiftRightConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x8(v) + case OpShiftRightConcatUint16x16: v.Op = OpAMD64VPSHRDVW256 return true - case OpShiftRightAndFillUpperFromUint16x32: + case OpShiftRightConcatUint16x32: v.Op = OpAMD64VPSHRDVW512 return true - case OpShiftRightAndFillUpperFromUint16x8: + case OpShiftRightConcatUint16x8: v.Op = OpAMD64VPSHRDVW128 return true - case OpShiftRightAndFillUpperFromUint32x16: + case OpShiftRightConcatUint32x16: v.Op = OpAMD64VPSHRDVD512 return true - case OpShiftRightAndFillUpperFromUint32x4: + case OpShiftRightConcatUint32x4: v.Op = OpAMD64VPSHRDVD128 return true - case OpShiftRightAndFillUpperFromUint32x8: + case OpShiftRightConcatUint32x8: v.Op = OpAMD64VPSHRDVD256 return true - case OpShiftRightAndFillUpperFromUint64x2: + case OpShiftRightConcatUint64x2: v.Op = OpAMD64VPSHRDVQ128 return true - case OpShiftRightAndFillUpperFromUint64x4: + case OpShiftRightConcatUint64x4: v.Op = OpAMD64VPSHRDVQ256 return true - case OpShiftRightAndFillUpperFromUint64x8: + case OpShiftRightConcatUint64x8: v.Op = OpAMD64VPSHRDVQ512 return true case OpShiftRightInt16x16: @@ -50752,12 +50752,12 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50772,12 +50772,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50792,12 +50792,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50812,12 +50812,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50832,12 +50832,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50852,12 +50852,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50872,12 +50872,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50892,12 +50892,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50912,12 +50912,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50932,12 +50932,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50952,12 +50952,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50972,12 +50972,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50992,12 +50992,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51012,12 +51012,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51032,12 +51032,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51052,12 +51052,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51072,12 +51072,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51092,12 +51092,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51436,12 +51436,12 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51456,12 +51456,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51476,12 +51476,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51496,12 +51496,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51516,12 +51516,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51536,12 +51536,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51556,12 +51556,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51576,12 +51576,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51596,12 +51596,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51616,12 +51616,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51636,12 +51636,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v *Value) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51656,12 +51656,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v *Value) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51676,12 +51676,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51696,12 +51696,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v *Value) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51716,12 +51716,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51736,12 +51736,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51756,12 +51756,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51776,12 +51776,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -52120,13 +52120,13 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) + // match: (ShiftLeftConcatMaskedInt16x16 x y z mask) // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -52140,13 +52140,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) + // match: (ShiftLeftConcatMaskedInt16x32 x y z mask) // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -52160,13 +52160,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) + // match: (ShiftLeftConcatMaskedInt16x8 x y z mask) // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -52180,13 +52180,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) + // match: (ShiftLeftConcatMaskedInt32x16 x y z mask) // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -52200,13 +52200,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) + // match: (ShiftLeftConcatMaskedInt32x4 x y z mask) // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -52220,13 +52220,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) + // match: (ShiftLeftConcatMaskedInt32x8 x y z mask) // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -52240,13 +52240,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) + // match: (ShiftLeftConcatMaskedInt64x2 x y z mask) // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -52260,13 +52260,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) + // match: (ShiftLeftConcatMaskedInt64x4 x y z mask) // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -52280,13 +52280,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) + // match: (ShiftLeftConcatMaskedInt64x8 x y z mask) // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -52300,13 +52300,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) + // match: (ShiftLeftConcatMaskedUint16x16 x y z mask) // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -52320,13 +52320,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) + // match: (ShiftLeftConcatMaskedUint16x32 x y z mask) // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -52340,13 +52340,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) + // match: (ShiftLeftConcatMaskedUint16x8 x y z mask) // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -52360,13 +52360,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) + // match: (ShiftLeftConcatMaskedUint32x16 x y z mask) // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -52380,13 +52380,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) + // match: (ShiftLeftConcatMaskedUint32x4 x y z mask) // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -52400,13 +52400,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) + // match: (ShiftLeftConcatMaskedUint32x8 x y z mask) // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -52420,13 +52420,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) + // match: (ShiftLeftConcatMaskedUint64x2 x y z mask) // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -52440,13 +52440,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) + // match: (ShiftLeftConcatMaskedUint64x4 x y z mask) // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -52460,13 +52460,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) + // match: (ShiftLeftConcatMaskedUint64x8 x y z mask) // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -52804,13 +52804,13 @@ func rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) + // match: (ShiftRightConcatMaskedInt16x16 x y z mask) // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -52824,13 +52824,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) + // match: (ShiftRightConcatMaskedInt16x32 x y z mask) // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -52844,13 +52844,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) + // match: (ShiftRightConcatMaskedInt16x8 x y z mask) // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -52864,13 +52864,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) + // match: (ShiftRightConcatMaskedInt32x16 x y z mask) // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -52884,13 +52884,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) + // match: (ShiftRightConcatMaskedInt32x4 x y z mask) // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -52904,13 +52904,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) + // match: (ShiftRightConcatMaskedInt32x8 x y z mask) // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -52924,13 +52924,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) + // match: (ShiftRightConcatMaskedInt64x2 x y z mask) // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -52944,13 +52944,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) + // match: (ShiftRightConcatMaskedInt64x4 x y z mask) // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -52964,13 +52964,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) + // match: (ShiftRightConcatMaskedInt64x8 x y z mask) // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -52984,13 +52984,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) + // match: (ShiftRightConcatMaskedUint16x16 x y z mask) // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -53004,13 +53004,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) + // match: (ShiftRightConcatMaskedUint16x32 x y z mask) // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -53024,13 +53024,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) + // match: (ShiftRightConcatMaskedUint16x8 x y z mask) // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -53044,13 +53044,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) + // match: (ShiftRightConcatMaskedUint32x16 x y z mask) // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -53064,13 +53064,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) + // match: (ShiftRightConcatMaskedUint32x4 x y z mask) // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -53084,13 +53084,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) + // match: (ShiftRightConcatMaskedUint32x8 x y z mask) // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -53104,13 +53104,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) + // match: (ShiftRightConcatMaskedUint64x2 x y z mask) // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -53124,13 +53124,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) + // match: (ShiftRightConcatMaskedUint64x4 x y z mask) // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -53144,13 +53144,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) + // match: (ShiftRightConcatMaskedUint64x8 x y z mask) // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a30144cbd10ece..d6c5b889ed3b8e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1412,42 +1412,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1484,42 +1484,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1556,42 +1556,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1628,42 +1628,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRight", opLen2(ssa.OpShiftRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRight", opLen2(ssa.OpShiftRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRight", opLen2(ssa.OpShiftRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x32, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 8d941360907c14..f88410af43d705 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -9259,155 +9259,155 @@ func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 -/* ShiftAllLeftAndFillUpperFrom */ +/* ShiftAllLeftConcat */ -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllLeftConcat(shift uint8, y Int16x8) Int16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllLeftConcat(shift uint8, y Int16x16) Int16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllLeftConcat(shift uint8, y Int16x32) Int16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllLeftConcat(shift uint8, y Int32x4) Int32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllLeftConcat(shift uint8, y Int32x8) Int32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllLeftConcat(shift uint8, y Int32x16) Int32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllLeftConcat(shift uint8, y Int64x2) Int64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllLeftConcat(shift uint8, y Int64x4) Int64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllLeftConcat(shift uint8, y Int64x8) Int64x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftConcat(shift uint8, y Uint16x8) Uint16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftConcat(shift uint8, y Uint16x16) Uint16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftConcat(shift uint8, y Uint16x32) Uint16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftConcat(shift uint8, y Uint32x4) Uint32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftConcat(shift uint8, y Uint32x8) Uint32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftConcat(shift uint8, y Uint32x16) Uint32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftConcat(shift uint8, y Uint64x2) Uint64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 -/* ShiftAllLeftAndFillUpperFromMasked */ +/* ShiftAllLeftConcatMasked */ -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9415,9 +9415,9 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9425,9 +9425,9 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9435,9 +9435,9 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9445,9 +9445,9 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9455,9 +9455,9 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9465,9 +9465,9 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9475,9 +9475,9 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9485,9 +9485,9 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9495,9 +9495,9 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9505,9 +9505,9 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9515,9 +9515,9 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9525,9 +9525,9 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9535,9 +9535,9 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9545,9 +9545,9 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9555,9 +9555,9 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9565,9 +9565,9 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9575,9 +9575,9 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9585,7 +9585,7 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ @@ -9807,155 +9807,155 @@ func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 -/* ShiftAllRightAndFillUpperFrom */ +/* ShiftAllRightConcat */ -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllRightConcat(shift uint8, y Int16x8) Int16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllRightConcat(shift uint8, y Int16x16) Int16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllRightConcat(shift uint8, y Int16x32) Int16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllRightConcat(shift uint8, y Int32x4) Int32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllRightConcat(shift uint8, y Int32x8) Int32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllRightConcat(shift uint8, y Int32x16) Int32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllRightConcat(shift uint8, y Int64x2) Int64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllRightConcat(shift uint8, y Int64x4) Int64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllRightConcat(shift uint8, y Int64x8) Int64x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightConcat(shift uint8, y Uint16x8) Uint16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightConcat(shift uint8, y Uint16x16) Uint16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 -/* ShiftAllRightAndFillUpperFromMasked */ +/* ShiftAllRightConcatMasked */ -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9963,9 +9963,9 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9973,9 +9973,9 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9983,9 +9983,9 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9993,9 +9993,9 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10003,9 +10003,9 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10013,9 +10013,9 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10023,9 +10023,9 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10033,9 +10033,9 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10043,9 +10043,9 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10053,9 +10053,9 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10063,9 +10063,9 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10073,9 +10073,9 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10083,9 +10083,9 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10093,9 +10093,9 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10103,9 +10103,9 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10113,9 +10113,9 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10123,9 +10123,9 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10133,7 +10133,7 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllRightMasked */ @@ -10355,261 +10355,261 @@ func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 -/* ShiftLeftAndFillUpperFrom */ +/* ShiftLeftConcat */ -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 +func (x Int16x8) ShiftLeftConcat(y Int16x8, z Int16x8) Int16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 +func (x Int16x16) ShiftLeftConcat(y Int16x16, z Int16x16) Int16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 +func (x Int16x32) ShiftLeftConcat(y Int16x32, z Int16x32) Int16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) ShiftLeftConcat(y Int32x4, z Int32x4) Int32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) ShiftLeftConcat(y Int32x8, z Int32x8) Int32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 +func (x Int32x16) ShiftLeftConcat(y Int32x16, z Int32x16) Int32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 +func (x Int64x2) ShiftLeftConcat(y Int64x2, z Int64x2) Int64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 +func (x Int64x4) ShiftLeftConcat(y Int64x4, z Int64x4) Int64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 +func (x Int64x8) ShiftLeftConcat(y Int64x8, z Int64x8) Int64x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 +func (x Uint16x8) ShiftLeftConcat(y Uint16x8, z Uint16x8) Uint16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 +func (x Uint16x16) ShiftLeftConcat(y Uint16x16, z Uint16x16) Uint16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 +func (x Uint16x32) ShiftLeftConcat(y Uint16x32, z Uint16x32) Uint16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 +func (x Uint32x4) ShiftLeftConcat(y Uint32x4, z Uint32x4) Uint32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 +func (x Uint32x8) ShiftLeftConcat(y Uint32x8, z Uint32x8) Uint32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 +func (x Uint32x16) ShiftLeftConcat(y Uint32x16, z Uint32x16) Uint32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 +func (x Uint64x2) ShiftLeftConcat(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 +func (x Uint64x4) ShiftLeftConcat(y Uint64x4, z Uint64x4) Uint64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +func (x Uint64x8) ShiftLeftConcat(y Uint64x8, z Uint64x8) Uint64x8 -/* ShiftLeftAndFillUpperFromMasked */ +/* ShiftLeftConcatMasked */ -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftLeftConcatMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftLeftConcatMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftLeftConcatMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftLeftConcatMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftLeftConcatMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftLeftConcatMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftLeftConcatMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftLeftConcatMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftLeftConcatMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftLeftConcatMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftLeftConcatMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftLeftConcatMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftLeftConcatMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftLeftConcatMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftLeftConcatMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftLeftConcatMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftLeftConcatMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftLeftConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftLeftMasked */ @@ -10831,261 +10831,261 @@ func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 -/* ShiftRightAndFillUpperFrom */ +/* ShiftRightConcat */ -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 +func (x Int16x8) ShiftRightConcat(y Int16x8, z Int16x8) Int16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 +func (x Int16x16) ShiftRightConcat(y Int16x16, z Int16x16) Int16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 +func (x Int16x32) ShiftRightConcat(y Int16x32, z Int16x32) Int16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) ShiftRightConcat(y Int32x4, z Int32x4) Int32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) ShiftRightConcat(y Int32x8, z Int32x8) Int32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 +func (x Int32x16) ShiftRightConcat(y Int32x16, z Int32x16) Int32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 +func (x Int64x2) ShiftRightConcat(y Int64x2, z Int64x2) Int64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 +func (x Int64x4) ShiftRightConcat(y Int64x4, z Int64x4) Int64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 +func (x Int64x8) ShiftRightConcat(y Int64x8, z Int64x8) Int64x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 +func (x Uint16x8) ShiftRightConcat(y Uint16x8, z Uint16x8) Uint16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 +func (x Uint16x16) ShiftRightConcat(y Uint16x16, z Uint16x16) Uint16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 +func (x Uint16x32) ShiftRightConcat(y Uint16x32, z Uint16x32) Uint16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 +func (x Uint32x4) ShiftRightConcat(y Uint32x4, z Uint32x4) Uint32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 +func (x Uint32x8) ShiftRightConcat(y Uint32x8, z Uint32x8) Uint32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 +func (x Uint32x16) ShiftRightConcat(y Uint32x16, z Uint32x16) Uint32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 +func (x Uint64x2) ShiftRightConcat(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 +func (x Uint64x4) ShiftRightConcat(y Uint64x4, z Uint64x4) Uint64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +func (x Uint64x8) ShiftRightConcat(y Uint64x8, z Uint64x8) Uint64x8 -/* ShiftRightAndFillUpperFromMasked */ +/* ShiftRightConcatMasked */ -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftRightConcatMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftRightConcatMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftRightConcatMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftRightConcatMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftRightConcatMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftRightConcatMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftRightConcatMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftRightConcatMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftRightConcatMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftRightConcatMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftRightConcatMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftRightConcatMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftRightConcatMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftRightConcatMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftRightConcatMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftRightConcatMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftRightConcatMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftRightConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRightMasked */ From c2d775d40168e44d1e2ad5dc88f42dba6c83c76e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 31 Jul 2025 23:51:50 +0000 Subject: [PATCH 108/139] [dev.simd] cmd/compile, simd: change PairDotProdAccumulate to AddDotProd This CL is generated by CL 692219. Change-Id: I50fa919f1edc5c6505bc6d3238f65b37fc7628b5 Reviewed-on: https://go-review.googlesource.com/c/go/+/692156 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 28 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 24 +- .../internal/ssa/_gen/simdgenericOps.go | 24 +- src/cmd/compile/internal/ssa/opGen.go | 144 ++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 300 +++++++++--------- .../compile/internal/ssagen/simdintrinsics.go | 24 +- src/simd/ops_amd64.go | 160 +++++----- src/simd/simd_test.go | 2 +- 8 files changed, 353 insertions(+), 353 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d4126cef1e3eaf..15ffbf66fa7cbb 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -813,7 +813,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked512: p = simdV2kkImm8(s, v) - case ssa.OpAMD64VFMADD213PS128, + case ssa.OpAMD64VPDPWSSD128, + ssa.OpAMD64VPDPWSSD256, + ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PD128, @@ -831,9 +834,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PD128, ssa.OpAMD64VFMSUBADD213PD256, ssa.OpAMD64VFMSUBADD213PD512, - ssa.OpAMD64VPDPWSSD128, - ssa.OpAMD64VPDPWSSD256, - ssa.OpAMD64VPDPWSSD512, ssa.OpAMD64VPERMI2B128, ssa.OpAMD64VPERMI2B256, ssa.OpAMD64VPERMI2B512, @@ -881,7 +881,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdV31ResultInArg0(s, v) - case ssa.OpAMD64VFMADD213PSMasked128, + case ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, @@ -899,9 +902,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VPDPWSSDMasked128, - ssa.OpAMD64VPDPWSSDMasked256, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPERMI2BMasked128, ssa.OpAMD64VPERMI2BMasked256, ssa.OpAMD64VPERMI2BMasked512, @@ -1064,6 +1064,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPSMasked512, @@ -1280,9 +1283,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPDPWSSDMasked128, - ssa.OpAMD64VPDPWSSDMasked256, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, @@ -1354,15 +1354,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 38b602f35b8624..7b7cbb9dc76791 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -54,6 +54,12 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) +(AddDotProdInt32x4 ...) => (VPDPWSSD128 ...) +(AddDotProdInt32x8 ...) => (VPDPWSSD256 ...) +(AddDotProdInt32x16 ...) => (VPDPWSSD512 ...) +(AddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) (AddMaskedFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) (AddMaskedFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) (AddMaskedFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) @@ -994,12 +1000,6 @@ (PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) -(PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) -(PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) -(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) -(PairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(PairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(PairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) (PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1307,6 +1307,12 @@ (SaturatedAddUint16x8 ...) => (VPADDSW128 ...) (SaturatedAddUint16x16 ...) => (VPADDSW256 ...) (SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(SaturatedAddDotProdInt32x4 ...) => (VPDPWSSDS128 ...) +(SaturatedAddDotProdInt32x8 ...) => (VPDPWSSDS256 ...) +(SaturatedAddDotProdInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedAddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedAddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedAddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (SaturatedAddMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (SaturatedAddMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (SaturatedAddMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) @@ -1319,12 +1325,6 @@ (SaturatedAddMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) (SaturatedAddMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) (SaturatedAddMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) -(SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) -(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) -(SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) (SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index d681620bc39a1c..6853c3b0919836 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -27,6 +27,12 @@ func simdGenericOps() []opData { {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, + {name: "AddDotProdInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdMaskedInt32x16", argLength: 4, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, @@ -892,12 +898,6 @@ func simdGenericOps() []opData { {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, - {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, @@ -1136,6 +1136,12 @@ func simdGenericOps() []opData { {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "RoundFloat64x4", argLength: 1, commutative: false}, + {name: "SaturatedAddDotProdInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedAddDotProdInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedAddDotProdInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedAddDotProdMaskedInt32x4", argLength: 4, commutative: false}, + {name: "SaturatedAddDotProdMaskedInt32x8", argLength: 4, commutative: false}, + {name: "SaturatedAddDotProdMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, @@ -1160,12 +1166,6 @@ func simdGenericOps() []opData { {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, - {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index de4477bc91b2a7..7427137b221c22 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4513,6 +4513,12 @@ const ( OpAbsoluteMaskedInt64x2 OpAbsoluteMaskedInt64x4 OpAbsoluteMaskedInt64x8 + OpAddDotProdInt32x4 + OpAddDotProdInt32x8 + OpAddDotProdInt32x16 + OpAddDotProdMaskedInt32x4 + OpAddDotProdMaskedInt32x8 + OpAddDotProdMaskedInt32x16 OpAddFloat32x4 OpAddFloat32x8 OpAddFloat32x16 @@ -5378,12 +5384,6 @@ const ( OpOrUint64x2 OpOrUint64x4 OpOrUint64x8 - OpPairDotProdAccumulateInt32x4 - OpPairDotProdAccumulateInt32x8 - OpPairDotProdAccumulateInt32x16 - OpPairDotProdAccumulateMaskedInt32x4 - OpPairDotProdAccumulateMaskedInt32x8 - OpPairDotProdAccumulateMaskedInt32x16 OpPairDotProdInt16x8 OpPairDotProdInt16x16 OpPairDotProdInt16x32 @@ -5622,6 +5622,12 @@ const ( OpRoundFloat32x8 OpRoundFloat64x2 OpRoundFloat64x4 + OpSaturatedAddDotProdInt32x4 + OpSaturatedAddDotProdInt32x8 + OpSaturatedAddDotProdInt32x16 + OpSaturatedAddDotProdMaskedInt32x4 + OpSaturatedAddDotProdMaskedInt32x8 + OpSaturatedAddDotProdMaskedInt32x16 OpSaturatedAddInt8x16 OpSaturatedAddInt8x32 OpSaturatedAddInt8x64 @@ -5646,12 +5652,6 @@ const ( OpSaturatedAddUint16x8 OpSaturatedAddUint16x16 OpSaturatedAddUint16x32 - OpSaturatedPairDotProdAccumulateInt32x4 - OpSaturatedPairDotProdAccumulateInt32x8 - OpSaturatedPairDotProdAccumulateInt32x16 - OpSaturatedPairDotProdAccumulateMaskedInt32x4 - OpSaturatedPairDotProdAccumulateMaskedInt32x8 - OpSaturatedPairDotProdAccumulateMaskedInt32x16 OpSaturatedPairwiseAddInt16x8 OpSaturatedPairwiseAddInt16x16 OpSaturatedPairwiseSubInt16x8 @@ -61789,6 +61789,36 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AddDotProdInt32x4", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdInt32x8", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdInt32x16", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdMaskedInt32x16", + argLen: 4, + generic: true, + }, { name: "AddFloat32x4", argLen: 2, @@ -66563,36 +66593,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "PairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "PairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "PairDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "PairDotProdInt16x8", argLen: 2, @@ -67783,6 +67783,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedAddDotProdInt32x4", + argLen: 3, + generic: true, + }, + { + name: "SaturatedAddDotProdInt32x8", + argLen: 3, + generic: true, + }, + { + name: "SaturatedAddDotProdInt32x16", + argLen: 3, + generic: true, + }, + { + name: "SaturatedAddDotProdMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "SaturatedAddDotProdMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "SaturatedAddDotProdMaskedInt32x16", + argLen: 4, + generic: true, + }, { name: "SaturatedAddInt8x16", argLen: 2, @@ -67927,36 +67957,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "SaturatedPairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "SaturatedPairwiseAddInt16x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e9a2fd70e4e774..5abb50ab713e69 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -631,6 +631,21 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true + case OpAddDotProdInt32x16: + v.Op = OpAMD64VPDPWSSD512 + return true + case OpAddDotProdInt32x4: + v.Op = OpAMD64VPDPWSSD128 + return true + case OpAddDotProdInt32x8: + v.Op = OpAMD64VPDPWSSD256 + return true + case OpAddDotProdMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v) + case OpAddDotProdMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v) + case OpAddDotProdMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v) case OpAddFloat32x16: v.Op = OpAMD64VADDPS512 return true @@ -3340,21 +3355,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true - case OpPairDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPWSSD512 - return true - case OpPairDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPWSSD128 - return true - case OpPairDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPWSSD256 - return true - case OpPairDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v) - case OpPairDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v) - case OpPairDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v) case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -4206,6 +4206,21 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) + case OpSaturatedAddDotProdInt32x16: + v.Op = OpAMD64VPDPWSSDS512 + return true + case OpSaturatedAddDotProdInt32x4: + v.Op = OpAMD64VPDPWSSDS128 + return true + case OpSaturatedAddDotProdInt32x8: + v.Op = OpAMD64VPDPWSSDS256 + return true + case OpSaturatedAddDotProdMaskedInt32x16: + return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v) + case OpSaturatedAddDotProdMaskedInt32x4: + return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v) + case OpSaturatedAddDotProdMaskedInt32x8: + return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v) case OpSaturatedAddInt16x16: v.Op = OpAMD64VPADDSW256 return true @@ -4266,21 +4281,6 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedAddUint8x64: v.Op = OpAMD64VPADDSB512 return true - case OpSaturatedPairDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPWSSDS512 - return true - case OpSaturatedPairDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPWSSDS128 - return true - case OpSaturatedPairDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPWSSDS256 - return true - case OpSaturatedPairDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v) - case OpSaturatedPairDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v) - case OpSaturatedPairDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v) case OpSaturatedPairwiseAddInt16x16: v.Op = OpAMD64VPHADDSW256 return true @@ -28514,6 +28514,66 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdMaskedInt32x16 x y z mask) + // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdMaskedInt32x4 x y z mask) + // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdMaskedInt32x8 x y z mask) + // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpAddMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -45669,66 +45729,6 @@ func rewriteValueAMD64_OpOrMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49721,6 +49721,66 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } +func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SaturatedAddDotProdMaskedInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SaturatedAddDotProdMaskedInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SaturatedAddDotProdMaskedInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49937,66 +49997,6 @@ func rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d6c5b889ed3b8e..12c388ca91301f 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -65,6 +65,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProd", opLen3(ssa.OpAddDotProdInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProd", opLen3(ssa.OpAddDotProdInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProd", opLen3(ssa.OpAddDotProdInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddMasked", opLen3(ssa.OpAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddMasked", opLen3(ssa.OpAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.AddMasked", opLen3(ssa.OpAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -1005,12 +1011,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1318,6 +1318,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x64, types.TypeVec512), sys.AMD64) @@ -1330,12 +1336,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index f88410af43d705..ea0c598157110d 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -304,6 +304,46 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) Add(y Uint64x8) Uint64x8 +/* AddDotProd */ + +// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSD, CPU Feature: AVXVNNI +func (x Int32x4) AddDotProd(y Int16x8, z Int16x8) Int32x4 + +// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSD, CPU Feature: AVXVNNI +func (x Int32x8) AddDotProd(y Int16x16, z Int16x16) Int32x8 + +// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProd(y Int16x32, z Int16x32) Int32x16 + +/* AddDotProdMasked */ + +// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x4) AddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 + +// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x8) AddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 + +// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 + /* AddMasked */ // AddMasked adds corresponding elements of two vectors. @@ -6339,46 +6379,6 @@ func (x Int16x16) PairDotProd(y Int16x16) Int32x8 // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProd(y Int16x32) Int32x16 -/* PairDotProdAccumulate */ - -// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int16x8) PairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int16x16) PairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 - -// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x32) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 - -/* PairDotProdAccumulateMasked */ - -// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x8) PairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 - -// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x16) PairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 - -// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 - /* PairDotProdMasked */ // PairDotProdMasked multiplies the elements and add the pairs together, @@ -8649,6 +8649,46 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +/* SaturatedAddDotProd */ + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 + +/* SaturatedAddDotProdMasked */ + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 + /* SaturatedAddMasked */ // SaturatedAddMasked adds corresponding elements of two vectors with saturation. @@ -8735,46 +8775,6 @@ func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 -/* SaturatedPairDotProdAccumulate */ - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int16x8) SaturatedPairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int16x16) SaturatedPairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x32) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 - -/* SaturatedPairDotProdAccumulateMasked */ - -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x8) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 - -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x16) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 - -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x32) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 - /* SaturatedPairwiseAdd */ // SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 72180a304691b2..2326addea94b8e 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -197,7 +197,7 @@ func TestPairDotProdAccumulate(t *testing.T) { z := simd.LoadInt32x4Slice([]int32{3, 3, 3, 3}) want := []int32{11, 11, 11, 11} got := make([]int32, 4) - z = x.PairDotProdAccumulate(x, z) + z = z.AddDotProd(x, x) z.StoreSlice(got) for i := range 4 { if got[i] != want[i] { From 3f92aa1ecae1f935731cffefcfe3a400e284ab82 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 1 Aug 2025 19:13:13 +0000 Subject: [PATCH 109/139] [dev.simd] cmd/compile, simd: make bitwise logic ops available to all u?int vectors This CL is generated by CL 692555. Change-Id: I24e6de83e0408576f385a1c8e861b08c583f9098 Reviewed-on: https://go-review.googlesource.com/c/go/+/692356 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssa/_gen/simdAMD64.rules | 16 +++ .../internal/ssa/_gen/simdgenericOps.go | 16 +++ src/cmd/compile/internal/ssa/opGen.go | 108 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 48 ++++++++ .../compile/internal/ssagen/simdintrinsics.go | 16 +++ src/simd/binary_test.go | 8 +- src/simd/ops_amd64.go | 80 +++++++++++++ 7 files changed, 288 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 7b7cbb9dc76791..1d54cfcdbddebb 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -96,8 +96,10 @@ (AddSubFloat64x4 ...) => (VADDSUBPD256 ...) (AndInt8x16 ...) => (VPAND128 ...) (AndInt8x32 ...) => (VPAND256 ...) +(AndInt8x64 ...) => (VPANDD512 ...) (AndInt16x8 ...) => (VPAND128 ...) (AndInt16x16 ...) => (VPAND256 ...) +(AndInt16x32 ...) => (VPANDD512 ...) (AndInt32x4 ...) => (VPAND128 ...) (AndInt32x8 ...) => (VPAND256 ...) (AndInt32x16 ...) => (VPANDD512 ...) @@ -106,8 +108,10 @@ (AndInt64x8 ...) => (VPANDQ512 ...) (AndUint8x16 ...) => (VPAND128 ...) (AndUint8x32 ...) => (VPAND256 ...) +(AndUint8x64 ...) => (VPANDD512 ...) (AndUint16x8 ...) => (VPAND128 ...) (AndUint16x16 ...) => (VPAND256 ...) +(AndUint16x32 ...) => (VPANDD512 ...) (AndUint32x4 ...) => (VPAND128 ...) (AndUint32x8 ...) => (VPAND256 ...) (AndUint32x16 ...) => (VPANDD512 ...) @@ -128,8 +132,10 @@ (AndMaskedUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) (AndNotInt8x16 ...) => (VPANDN128 ...) (AndNotInt8x32 ...) => (VPANDN256 ...) +(AndNotInt8x64 ...) => (VPANDND512 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) (AndNotInt16x16 ...) => (VPANDN256 ...) +(AndNotInt16x32 ...) => (VPANDND512 ...) (AndNotInt32x4 ...) => (VPANDN128 ...) (AndNotInt32x8 ...) => (VPANDN256 ...) (AndNotInt32x16 ...) => (VPANDND512 ...) @@ -138,8 +144,10 @@ (AndNotInt64x8 ...) => (VPANDNQ512 ...) (AndNotUint8x16 ...) => (VPANDN128 ...) (AndNotUint8x32 ...) => (VPANDN256 ...) +(AndNotUint8x64 ...) => (VPANDND512 ...) (AndNotUint16x8 ...) => (VPANDN128 ...) (AndNotUint16x16 ...) => (VPANDN256 ...) +(AndNotUint16x32 ...) => (VPANDND512 ...) (AndNotUint32x4 ...) => (VPANDN128 ...) (AndNotUint32x8 ...) => (VPANDN256 ...) (AndNotUint32x16 ...) => (VPANDND512 ...) @@ -967,8 +975,10 @@ (NotEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) +(OrInt8x64 ...) => (VPORD512 ...) (OrInt16x8 ...) => (VPOR128 ...) (OrInt16x16 ...) => (VPOR256 ...) +(OrInt16x32 ...) => (VPORD512 ...) (OrInt32x4 ...) => (VPOR128 ...) (OrInt32x8 ...) => (VPOR256 ...) (OrInt32x16 ...) => (VPORD512 ...) @@ -977,8 +987,10 @@ (OrInt64x8 ...) => (VPORQ512 ...) (OrUint8x16 ...) => (VPOR128 ...) (OrUint8x32 ...) => (VPOR256 ...) +(OrUint8x64 ...) => (VPORD512 ...) (OrUint16x8 ...) => (VPOR128 ...) (OrUint16x16 ...) => (VPOR256 ...) +(OrUint16x32 ...) => (VPORD512 ...) (OrUint32x4 ...) => (VPOR128 ...) (OrUint32x8 ...) => (VPOR256 ...) (OrUint32x16 ...) => (VPORD512 ...) @@ -1773,8 +1785,10 @@ (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) +(XorInt8x64 ...) => (VPXORD512 ...) (XorInt16x8 ...) => (VPXOR128 ...) (XorInt16x16 ...) => (VPXOR256 ...) +(XorInt16x32 ...) => (VPXORD512 ...) (XorInt32x4 ...) => (VPXOR128 ...) (XorInt32x8 ...) => (VPXOR256 ...) (XorInt32x16 ...) => (VPXORD512 ...) @@ -1783,8 +1797,10 @@ (XorInt64x8 ...) => (VPXORQ512 ...) (XorUint8x16 ...) => (VPXOR128 ...) (XorUint8x32 ...) => (VPXOR256 ...) +(XorUint8x64 ...) => (VPXORD512 ...) (XorUint16x8 ...) => (VPXOR128 ...) (XorUint16x16 ...) => (VPXOR256 ...) +(XorUint16x32 ...) => (VPXORD512 ...) (XorUint32x4 ...) => (VPXOR128 ...) (XorUint32x8 ...) => (VPXOR256 ...) (XorUint32x16 ...) => (VPXORD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 6853c3b0919836..492a994e9363cf 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -99,8 +99,10 @@ func simdGenericOps() []opData { {name: "AddUint64x8", argLength: 2, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, + {name: "AndInt8x64", argLength: 2, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, + {name: "AndInt16x32", argLength: 2, commutative: true}, {name: "AndInt32x4", argLength: 2, commutative: true}, {name: "AndInt32x8", argLength: 2, commutative: true}, {name: "AndInt32x16", argLength: 2, commutative: true}, @@ -121,8 +123,10 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndNotInt8x16", argLength: 2, commutative: false}, {name: "AndNotInt8x32", argLength: 2, commutative: false}, + {name: "AndNotInt8x64", argLength: 2, commutative: false}, {name: "AndNotInt16x8", argLength: 2, commutative: false}, {name: "AndNotInt16x16", argLength: 2, commutative: false}, + {name: "AndNotInt16x32", argLength: 2, commutative: false}, {name: "AndNotInt32x4", argLength: 2, commutative: false}, {name: "AndNotInt32x8", argLength: 2, commutative: false}, {name: "AndNotInt32x16", argLength: 2, commutative: false}, @@ -143,8 +147,10 @@ func simdGenericOps() []opData { {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AndNotUint8x32", argLength: 2, commutative: false}, + {name: "AndNotUint8x64", argLength: 2, commutative: false}, {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AndNotUint16x16", argLength: 2, commutative: false}, + {name: "AndNotUint16x32", argLength: 2, commutative: false}, {name: "AndNotUint32x4", argLength: 2, commutative: false}, {name: "AndNotUint32x8", argLength: 2, commutative: false}, {name: "AndNotUint32x16", argLength: 2, commutative: false}, @@ -153,8 +159,10 @@ func simdGenericOps() []opData { {name: "AndNotUint64x8", argLength: 2, commutative: false}, {name: "AndUint8x16", argLength: 2, commutative: true}, {name: "AndUint8x32", argLength: 2, commutative: true}, + {name: "AndUint8x64", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x16", argLength: 2, commutative: true}, + {name: "AndUint16x32", argLength: 2, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, {name: "AndUint32x16", argLength: 2, commutative: true}, @@ -868,8 +876,10 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x8", argLength: 2, commutative: true}, {name: "OrInt8x16", argLength: 2, commutative: true}, {name: "OrInt8x32", argLength: 2, commutative: true}, + {name: "OrInt8x64", argLength: 2, commutative: true}, {name: "OrInt16x8", argLength: 2, commutative: true}, {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "OrInt16x32", argLength: 2, commutative: true}, {name: "OrInt32x4", argLength: 2, commutative: true}, {name: "OrInt32x8", argLength: 2, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, @@ -890,8 +900,10 @@ func simdGenericOps() []opData { {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "OrUint8x64", argLength: 2, commutative: true}, {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "OrUint16x16", argLength: 2, commutative: true}, + {name: "OrUint16x32", argLength: 2, commutative: true}, {name: "OrUint32x4", argLength: 2, commutative: true}, {name: "OrUint32x8", argLength: 2, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, @@ -1512,8 +1524,10 @@ func simdGenericOps() []opData { {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "XorInt8x16", argLength: 2, commutative: true}, {name: "XorInt8x32", argLength: 2, commutative: true}, + {name: "XorInt8x64", argLength: 2, commutative: true}, {name: "XorInt16x8", argLength: 2, commutative: true}, {name: "XorInt16x16", argLength: 2, commutative: true}, + {name: "XorInt16x32", argLength: 2, commutative: true}, {name: "XorInt32x4", argLength: 2, commutative: true}, {name: "XorInt32x8", argLength: 2, commutative: true}, {name: "XorInt32x16", argLength: 2, commutative: true}, @@ -1534,8 +1548,10 @@ func simdGenericOps() []opData { {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "XorUint8x32", argLength: 2, commutative: true}, + {name: "XorUint8x64", argLength: 2, commutative: true}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "XorUint16x16", argLength: 2, commutative: true}, + {name: "XorUint16x32", argLength: 2, commutative: true}, {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "XorUint32x16", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7427137b221c22..e8a5354c00130b 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4585,8 +4585,10 @@ const ( OpAddUint64x8 OpAndInt8x16 OpAndInt8x32 + OpAndInt8x64 OpAndInt16x8 OpAndInt16x16 + OpAndInt16x32 OpAndInt32x4 OpAndInt32x8 OpAndInt32x16 @@ -4607,8 +4609,10 @@ const ( OpAndMaskedUint64x8 OpAndNotInt8x16 OpAndNotInt8x32 + OpAndNotInt8x64 OpAndNotInt16x8 OpAndNotInt16x16 + OpAndNotInt16x32 OpAndNotInt32x4 OpAndNotInt32x8 OpAndNotInt32x16 @@ -4629,8 +4633,10 @@ const ( OpAndNotMaskedUint64x8 OpAndNotUint8x16 OpAndNotUint8x32 + OpAndNotUint8x64 OpAndNotUint16x8 OpAndNotUint16x16 + OpAndNotUint16x32 OpAndNotUint32x4 OpAndNotUint32x8 OpAndNotUint32x16 @@ -4639,8 +4645,10 @@ const ( OpAndNotUint64x8 OpAndUint8x16 OpAndUint8x32 + OpAndUint8x64 OpAndUint16x8 OpAndUint16x16 + OpAndUint16x32 OpAndUint32x4 OpAndUint32x8 OpAndUint32x16 @@ -5354,8 +5362,10 @@ const ( OpNotEqualUint64x8 OpOrInt8x16 OpOrInt8x32 + OpOrInt8x64 OpOrInt16x8 OpOrInt16x16 + OpOrInt16x32 OpOrInt32x4 OpOrInt32x8 OpOrInt32x16 @@ -5376,8 +5386,10 @@ const ( OpOrMaskedUint64x8 OpOrUint8x16 OpOrUint8x32 + OpOrUint8x64 OpOrUint16x8 OpOrUint16x16 + OpOrUint16x32 OpOrUint32x4 OpOrUint32x8 OpOrUint32x16 @@ -5998,8 +6010,10 @@ const ( OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpXorInt8x16 OpXorInt8x32 + OpXorInt8x64 OpXorInt16x8 OpXorInt16x16 + OpXorInt16x32 OpXorInt32x4 OpXorInt32x8 OpXorInt32x16 @@ -6020,8 +6034,10 @@ const ( OpXorMaskedUint64x8 OpXorUint8x16 OpXorUint8x32 + OpXorUint8x64 OpXorUint16x8 OpXorUint16x16 + OpXorUint16x32 OpXorUint32x4 OpXorUint32x8 OpXorUint32x16 @@ -62211,6 +62227,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndInt16x8", argLen: 2, @@ -62223,6 +62245,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndInt32x4", argLen: 2, @@ -62341,6 +62369,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotInt8x64", + argLen: 2, + generic: true, + }, { name: "AndNotInt16x8", argLen: 2, @@ -62351,6 +62384,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotInt16x32", + argLen: 2, + generic: true, + }, { name: "AndNotInt32x4", argLen: 2, @@ -62451,6 +62489,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotUint8x64", + argLen: 2, + generic: true, + }, { name: "AndNotUint16x8", argLen: 2, @@ -62461,6 +62504,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotUint16x32", + argLen: 2, + generic: true, + }, { name: "AndNotUint32x4", argLen: 2, @@ -62503,6 +62551,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndUint16x8", argLen: 2, @@ -62515,6 +62569,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndUint32x4", argLen: 2, @@ -66413,6 +66473,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrInt16x8", argLen: 2, @@ -66425,6 +66491,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrInt32x4", argLen: 2, @@ -66545,6 +66617,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrUint16x8", argLen: 2, @@ -66557,6 +66635,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrUint32x4", argLen: 2, @@ -69689,6 +69773,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorInt16x8", argLen: 2, @@ -69701,6 +69791,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorInt32x4", argLen: 2, @@ -69821,6 +69917,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorUint16x8", argLen: 2, @@ -69833,6 +69935,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorUint32x4", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5abb50ab713e69..82f13b43c6ee82 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -831,6 +831,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt16x16: v.Op = OpAMD64VPAND256 return true + case OpAndInt16x32: + v.Op = OpAMD64VPANDD512 + return true case OpAndInt16x8: v.Op = OpAMD64VPAND128 return true @@ -858,6 +861,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt8x32: v.Op = OpAMD64VPAND256 return true + case OpAndInt8x64: + v.Op = OpAMD64VPANDD512 + return true case OpAndMaskedInt32x16: return rewriteValueAMD64_OpAndMaskedInt32x16(v) case OpAndMaskedInt32x4: @@ -885,6 +891,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotInt16x16: v.Op = OpAMD64VPANDN256 return true + case OpAndNotInt16x32: + v.Op = OpAMD64VPANDND512 + return true case OpAndNotInt16x8: v.Op = OpAMD64VPANDN128 return true @@ -912,6 +921,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotInt8x32: v.Op = OpAMD64VPANDN256 return true + case OpAndNotInt8x64: + v.Op = OpAMD64VPANDND512 + return true case OpAndNotMaskedInt32x16: return rewriteValueAMD64_OpAndNotMaskedInt32x16(v) case OpAndNotMaskedInt32x4: @@ -939,6 +951,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotUint16x16: v.Op = OpAMD64VPANDN256 return true + case OpAndNotUint16x32: + v.Op = OpAMD64VPANDND512 + return true case OpAndNotUint16x8: v.Op = OpAMD64VPANDN128 return true @@ -966,9 +981,15 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotUint8x32: v.Op = OpAMD64VPANDN256 return true + case OpAndNotUint8x64: + v.Op = OpAMD64VPANDND512 + return true case OpAndUint16x16: v.Op = OpAMD64VPAND256 return true + case OpAndUint16x32: + v.Op = OpAMD64VPANDD512 + return true case OpAndUint16x8: v.Op = OpAMD64VPAND128 return true @@ -996,6 +1017,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndUint8x32: v.Op = OpAMD64VPAND256 return true + case OpAndUint8x64: + v.Op = OpAMD64VPANDD512 + return true case OpApproximateReciprocalFloat32x16: v.Op = OpAMD64VRCP14PS512 return true @@ -3274,6 +3298,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrInt16x16: v.Op = OpAMD64VPOR256 return true + case OpOrInt16x32: + v.Op = OpAMD64VPORD512 + return true case OpOrInt16x8: v.Op = OpAMD64VPOR128 return true @@ -3301,6 +3328,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrInt8x32: v.Op = OpAMD64VPOR256 return true + case OpOrInt8x64: + v.Op = OpAMD64VPORD512 + return true case OpOrMaskedInt32x16: return rewriteValueAMD64_OpOrMaskedInt32x16(v) case OpOrMaskedInt32x4: @@ -3328,6 +3358,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint16x16: v.Op = OpAMD64VPOR256 return true + case OpOrUint16x32: + v.Op = OpAMD64VPORD512 + return true case OpOrUint16x8: v.Op = OpAMD64VPOR128 return true @@ -3355,6 +3388,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true + case OpOrUint8x64: + v.Op = OpAMD64VPORD512 + return true case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -5537,6 +5573,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorInt16x16: v.Op = OpAMD64VPXOR256 return true + case OpXorInt16x32: + v.Op = OpAMD64VPXORD512 + return true case OpXorInt16x8: v.Op = OpAMD64VPXOR128 return true @@ -5564,6 +5603,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorInt8x32: v.Op = OpAMD64VPXOR256 return true + case OpXorInt8x64: + v.Op = OpAMD64VPXORD512 + return true case OpXorMaskedInt32x16: return rewriteValueAMD64_OpXorMaskedInt32x16(v) case OpXorMaskedInt32x4: @@ -5591,6 +5633,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorUint16x16: v.Op = OpAMD64VPXOR256 return true + case OpXorUint16x32: + v.Op = OpAMD64VPXORD512 + return true case OpXorUint16x8: v.Op = OpAMD64VPXOR128 return true @@ -5618,6 +5663,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorUint8x32: v.Op = OpAMD64VPXOR256 return true + case OpXorUint8x64: + v.Op = OpAMD64VPXORD512 + return true case OpZero: return rewriteValueAMD64_OpZero(v) case OpZeroExt16to32: diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 12c388ca91301f..7a7367ee1e7503 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -107,8 +107,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.And", opLen2(ssa.OpAndInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.And", opLen2(ssa.OpAndInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.And", opLen2(ssa.OpAndInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) @@ -117,8 +119,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.And", opLen2(ssa.OpAndInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.And", opLen2(ssa.OpAndUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.And", opLen2(ssa.OpAndUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) @@ -139,8 +143,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x8.AndMasked", opLen3(ssa.OpAndMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2_21(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2_21(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AndNot", opLen2_21(ssa.OpAndNotInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.AndNot", opLen2_21(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.AndNot", opLen2_21(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AndNot", opLen2_21(ssa.OpAndNotInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.AndNot", opLen2_21(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.AndNot", opLen2_21(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.AndNot", opLen2_21(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) @@ -149,8 +155,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.AndNot", opLen2_21(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.AndNot", opLen2_21(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.AndNot", opLen2_21(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AndNot", opLen2_21(ssa.OpAndNotUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.AndNot", opLen2_21(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.AndNot", opLen2_21(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AndNot", opLen2_21(ssa.OpAndNotUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.AndNot", opLen2_21(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.AndNot", opLen2_21(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.AndNot", opLen2_21(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) @@ -978,8 +986,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Or", opLen2(ssa.OpOrInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Or", opLen2(ssa.OpOrInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) @@ -988,8 +998,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Or", opLen2(ssa.OpOrUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Or", opLen2(ssa.OpOrUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) @@ -1784,8 +1796,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Xor", opLen2(ssa.OpXorInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Xor", opLen2(ssa.OpXorInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) @@ -1794,8 +1808,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Xor", opLen2(ssa.OpXorUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Xor", opLen2(ssa.OpXorUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/binary_test.go b/src/simd/binary_test.go index 4221e741449f15..b7daf736f4e264 100644 --- a/src/simd/binary_test.go +++ b/src/simd/binary_test.go @@ -230,12 +230,12 @@ func TestAndNot(t *testing.T) { testUint8x32Binary(t, simd.Uint8x32.AndNot, andNotSlice[uint8]) if simd.HasAVX512() { - // testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) // missing - // testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) // missing + testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) testInt32x16Binary(t, simd.Int32x16.AndNot, andNotSlice[int32]) testInt64x8Binary(t, simd.Int64x8.AndNot, andNotSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) // missing - // testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) // missing + testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) testUint32x16Binary(t, simd.Uint32x16.AndNot, andNotSlice[uint32]) testUint64x8Binary(t, simd.Uint64x8.AndNot, andNotSlice[uint64]) } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ea0c598157110d..5776350fe9f136 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -590,6 +590,11 @@ func (x Int8x16) And(y Int8x16) Int8x16 // Asm: VPAND, CPU Feature: AVX2 func (x Int8x32) And(y Int8x32) Int8x32 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Int8x64) And(y Int8x64) Int8x64 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -600,6 +605,11 @@ func (x Int16x8) And(y Int16x8) Int16x8 // Asm: VPAND, CPU Feature: AVX2 func (x Int16x16) And(y Int16x16) Int16x16 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Int16x32) And(y Int16x32) Int16x32 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -640,6 +650,11 @@ func (x Uint8x16) And(y Uint8x16) Uint8x16 // Asm: VPAND, CPU Feature: AVX2 func (x Uint8x32) And(y Uint8x32) Uint8x32 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Uint8x64) And(y Uint8x64) Uint8x64 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -650,6 +665,11 @@ func (x Uint16x8) And(y Uint16x8) Uint16x8 // Asm: VPAND, CPU Feature: AVX2 func (x Uint16x16) And(y Uint16x16) Uint16x16 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Uint16x32) And(y Uint16x32) Uint16x32 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -778,6 +798,11 @@ func (x Int8x16) AndNot(y Int8x16) Int8x16 // Asm: VPANDN, CPU Feature: AVX2 func (x Int8x32) AndNot(y Int8x32) Int8x32 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Int8x64) AndNot(y Int8x64) Int8x64 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -788,6 +813,11 @@ func (x Int16x8) AndNot(y Int16x8) Int16x8 // Asm: VPANDN, CPU Feature: AVX2 func (x Int16x16) AndNot(y Int16x16) Int16x16 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Int16x32) AndNot(y Int16x32) Int16x32 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -828,6 +858,11 @@ func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 // Asm: VPANDN, CPU Feature: AVX2 func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Uint8x64) AndNot(y Uint8x64) Uint8x64 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -838,6 +873,11 @@ func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 // Asm: VPANDN, CPU Feature: AVX2 func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Uint16x32) AndNot(y Uint16x32) Uint16x32 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -6183,6 +6223,11 @@ func (x Int8x16) Or(y Int8x16) Int8x16 // Asm: VPOR, CPU Feature: AVX2 func (x Int8x32) Or(y Int8x32) Int8x32 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Int8x64) Or(y Int8x64) Int8x64 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -6193,6 +6238,11 @@ func (x Int16x8) Or(y Int16x8) Int16x8 // Asm: VPOR, CPU Feature: AVX2 func (x Int16x16) Or(y Int16x16) Int16x16 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Int16x32) Or(y Int16x32) Int16x32 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -6233,6 +6283,11 @@ func (x Uint8x16) Or(y Uint8x16) Uint8x16 // Asm: VPOR, CPU Feature: AVX2 func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Uint8x64) Or(y Uint8x64) Uint8x64 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -6243,6 +6298,11 @@ func (x Uint16x8) Or(y Uint16x8) Uint16x8 // Asm: VPOR, CPU Feature: AVX2 func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Uint16x32) Or(y Uint16x32) Uint16x32 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -11867,6 +11927,11 @@ func (x Int8x16) Xor(y Int8x16) Int8x16 // Asm: VPXOR, CPU Feature: AVX2 func (x Int8x32) Xor(y Int8x32) Int8x32 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Int8x64) Xor(y Int8x64) Int8x64 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX @@ -11877,6 +11942,11 @@ func (x Int16x8) Xor(y Int16x8) Int16x8 // Asm: VPXOR, CPU Feature: AVX2 func (x Int16x16) Xor(y Int16x16) Int16x16 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Int16x32) Xor(y Int16x32) Int16x32 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX @@ -11917,6 +11987,11 @@ func (x Uint8x16) Xor(y Uint8x16) Uint8x16 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint8x32) Xor(y Uint8x32) Uint8x32 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Uint8x64) Xor(y Uint8x64) Uint8x64 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX @@ -11927,6 +12002,11 @@ func (x Uint16x8) Xor(y Uint16x8) Uint16x8 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint16x16) Xor(y Uint16x16) Uint16x16 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Uint16x32) Xor(y Uint16x32) Uint16x32 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX From d375b95357fdf8cdfec722b3672dcc425acf10ad Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 25 Jul 2025 15:18:11 -0400 Subject: [PATCH 110/139] [dev.simd] simd: move lots of slice functions and methods to generated code Lots of handwritten/stenciled code is now untouched by human hands For certain combinations of operation-arity and type, there is an option to use a flaky version of a test helper, that only requires "close enough". For example: testFloat32x4TernaryFlaky(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32], 0.001) Some of the quirkier operations have their behavior captured in their test-simulation, for example, ceilResidue regards infinities as integers (therefore their residue is zero). Change-Id: I8242914e5ab399edbe226da8586988441cffa83f Reviewed-on: https://go-review.googlesource.com/c/go/+/690575 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/binary_helpers_test.go | 60 +- src/simd/compare_helpers_test.go | 60 +- src/simd/comparemasked_helpers_test.go | 60 +- src/simd/genfiles.go | 238 +++++++- src/simd/helpers_test.go | 32 +- src/simd/simulation_helpers_test.go | 20 +- src/simd/slice_amd64.go | 808 +++++++++++++++++++++++++ src/simd/slicepart_amd64.go | 506 +--------------- src/simd/slicepart_test.go | 2 +- src/simd/ternary_helpers_test.go | 111 +++- src/simd/ternary_test.go | 6 +- src/simd/unary_helpers_test.go | 162 +++-- src/simd/unary_test.go | 18 + src/simd/unsafe_helpers.go | 217 +++++++ 14 files changed, 1624 insertions(+), 676 deletions(-) create mode 100644 src/simd/unsafe_helpers.go diff --git a/src/simd/binary_helpers_test.go b/src/simd/binary_helpers_test.go index fbf31beb7c8e6b..82cf784bcac72e 100644 --- a/src/simd/binary_helpers_test.go +++ b/src/simd/binary_helpers_test.go @@ -24,7 +24,7 @@ func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, wan g := make([]int8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -39,7 +39,7 @@ func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, wan g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -54,7 +54,7 @@ func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, wan g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -69,7 +69,7 @@ func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, wan g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -84,7 +84,7 @@ func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -99,7 +99,7 @@ func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -114,7 +114,7 @@ func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -129,7 +129,7 @@ func testUint64x2Binary(t *testing.T, f func(_, _ simd.Uint64x2) simd.Uint64x2, g := make([]uint64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -144,7 +144,7 @@ func testFloat32x4Binary(t *testing.T, f func(_, _ simd.Float32x4) simd.Float32x g := make([]float32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -159,7 +159,7 @@ func testFloat64x2Binary(t *testing.T, f func(_, _ simd.Float64x2) simd.Float64x g := make([]float64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -174,7 +174,7 @@ func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, wan g := make([]int8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -189,7 +189,7 @@ func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -204,7 +204,7 @@ func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, wan g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -219,7 +219,7 @@ func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, wan g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -234,7 +234,7 @@ func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -249,7 +249,7 @@ func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x1 g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -264,7 +264,7 @@ func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -279,7 +279,7 @@ func testUint64x4Binary(t *testing.T, f func(_, _ simd.Uint64x4) simd.Uint64x4, g := make([]uint64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -294,7 +294,7 @@ func testFloat32x8Binary(t *testing.T, f func(_, _ simd.Float32x8) simd.Float32x g := make([]float32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -309,7 +309,7 @@ func testFloat64x4Binary(t *testing.T, f func(_, _ simd.Float64x4) simd.Float64x g := make([]float64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -324,7 +324,7 @@ func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, wan g := make([]int8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -339,7 +339,7 @@ func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -354,7 +354,7 @@ func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -369,7 +369,7 @@ func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, wan g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -384,7 +384,7 @@ func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -399,7 +399,7 @@ func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x3 g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -414,7 +414,7 @@ func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x1 g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -429,7 +429,7 @@ func testUint64x8Binary(t *testing.T, f func(_, _ simd.Uint64x8) simd.Uint64x8, g := make([]uint64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -444,7 +444,7 @@ func testFloat32x16Binary(t *testing.T, f func(_, _ simd.Float32x16) simd.Float3 g := make([]float32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -459,6 +459,6 @@ func testFloat64x8Binary(t *testing.T, f func(_, _ simd.Float64x8) simd.Float64x g := make([]float64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } diff --git a/src/simd/compare_helpers_test.go b/src/simd/compare_helpers_test.go index e6d7c82c8fe69f..aef703c66a06d1 100644 --- a/src/simd/compare_helpers_test.go +++ b/src/simd/compare_helpers_test.go @@ -24,7 +24,7 @@ func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, w g := make([]int8, n) f(a, b).AsInt8x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -39,7 +39,7 @@ func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, w g := make([]int16, n) f(a, b).AsInt16x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -54,7 +54,7 @@ func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, w g := make([]int32, n) f(a, b).AsInt32x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -69,7 +69,7 @@ func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, w g := make([]int64, n) f(a, b).AsInt64x2().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -84,7 +84,7 @@ func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, g := make([]int8, n) f(a, b).AsInt8x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -99,7 +99,7 @@ func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, g := make([]int16, n) f(a, b).AsInt16x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -114,7 +114,7 @@ func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, g := make([]int32, n) f(a, b).AsInt32x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -129,7 +129,7 @@ func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, g := make([]int64, n) f(a, b).AsInt64x2().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -144,7 +144,7 @@ func testFloat32x4Compare(t *testing.T, f func(_, _ simd.Float32x4) simd.Mask32x g := make([]int32, n) f(a, b).AsInt32x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -159,7 +159,7 @@ func testFloat64x2Compare(t *testing.T, f func(_, _ simd.Float64x2) simd.Mask64x g := make([]int64, n) f(a, b).AsInt64x2().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -174,7 +174,7 @@ func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, w g := make([]int8, n) f(a, b).AsInt8x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -189,7 +189,7 @@ func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16 g := make([]int16, n) f(a, b).AsInt16x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -204,7 +204,7 @@ func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, w g := make([]int32, n) f(a, b).AsInt32x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -219,7 +219,7 @@ func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, w g := make([]int64, n) f(a, b).AsInt64x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -234,7 +234,7 @@ func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, g := make([]int8, n) f(a, b).AsInt8x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -249,7 +249,7 @@ func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x g := make([]int16, n) f(a, b).AsInt16x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -264,7 +264,7 @@ func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, g := make([]int32, n) f(a, b).AsInt32x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -279,7 +279,7 @@ func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, g := make([]int64, n) f(a, b).AsInt64x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -294,7 +294,7 @@ func testFloat32x8Compare(t *testing.T, f func(_, _ simd.Float32x8) simd.Mask32x g := make([]int32, n) f(a, b).AsInt32x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -309,7 +309,7 @@ func testFloat64x4Compare(t *testing.T, f func(_, _ simd.Float64x4) simd.Mask64x g := make([]int64, n) f(a, b).AsInt64x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -324,7 +324,7 @@ func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, w g := make([]int8, n) f(a, b).AsInt8x64().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -339,7 +339,7 @@ func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32 g := make([]int16, n) f(a, b).AsInt16x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -354,7 +354,7 @@ func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16 g := make([]int32, n) f(a, b).AsInt32x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -369,7 +369,7 @@ func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, w g := make([]int64, n) f(a, b).AsInt64x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -384,7 +384,7 @@ func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, g := make([]int8, n) f(a, b).AsInt8x64().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -399,7 +399,7 @@ func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x g := make([]int16, n) f(a, b).AsInt16x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -414,7 +414,7 @@ func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x g := make([]int32, n) f(a, b).AsInt32x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -429,7 +429,7 @@ func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, g := make([]int64, n) f(a, b).AsInt64x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -444,7 +444,7 @@ func testFloat32x16Compare(t *testing.T, f func(_, _ simd.Float32x16) simd.Mask3 g := make([]int32, n) f(a, b).AsInt32x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -459,6 +459,6 @@ func testFloat64x8Compare(t *testing.T, f func(_, _ simd.Float64x8) simd.Mask64x g := make([]int64, n) f(a, b).AsInt64x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go index 0baba27e544278..542145c11e173d 100644 --- a/src/simd/comparemasked_helpers_test.go +++ b/src/simd/comparemasked_helpers_test.go @@ -33,7 +33,7 @@ func testInt8x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -57,7 +57,7 @@ func testInt16x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -81,7 +81,7 @@ func testInt32x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -105,7 +105,7 @@ func testInt64x2CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -129,7 +129,7 @@ func testUint8x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -153,7 +153,7 @@ func testUint16x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -177,7 +177,7 @@ func testUint32x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -201,7 +201,7 @@ func testUint64x2CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -225,7 +225,7 @@ func testFloat32x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -249,7 +249,7 @@ func testFloat64x2CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -273,7 +273,7 @@ func testInt8x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -297,7 +297,7 @@ func testInt16x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -321,7 +321,7 @@ func testInt32x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -345,7 +345,7 @@ func testInt64x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -369,7 +369,7 @@ func testUint8x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -393,7 +393,7 @@ func testUint16x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -417,7 +417,7 @@ func testUint32x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -441,7 +441,7 @@ func testUint64x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -465,7 +465,7 @@ func testFloat32x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -489,7 +489,7 @@ func testFloat64x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -513,7 +513,7 @@ func testInt8x64CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -537,7 +537,7 @@ func testInt16x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -561,7 +561,7 @@ func testInt32x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -585,7 +585,7 @@ func testInt64x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -609,7 +609,7 @@ func testUint8x64CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -633,7 +633,7 @@ func testUint16x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -657,7 +657,7 @@ func testUint32x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -681,7 +681,7 @@ func testUint64x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -705,7 +705,7 @@ func testFloat32x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -729,6 +729,6 @@ func testFloat64x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 76f16392e67103..269659a65350e5 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -10,6 +10,7 @@ package main // slice operations and tests import ( + "bufio" "bytes" "flag" "fmt" @@ -44,6 +45,37 @@ var allShapes = &shapes{ // these are the shapes that are currently converted to int32 // (not all conversions are available, yet) var convert32Shapes = &shapes{ + + vecs: []int{128, 256, 512}, + floats: []int{32}, +} + +var avx512MaskedLoadShapes = &shapes{ + vecs: []int{512}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + +var avx2MaskedLoadShapes = &shapes{ + vecs: []int{128, 256}, + ints: []int{32, 64}, + uints: []int{32, 64}, + floats: []int{32, 64}, +} + +var avx2SmallLoadPunShapes = &shapes{ + // ints are done by hand, these are type-punned to int. + vecs: []int{128, 256}, + uints: []int{8, 16}, +} + +var unaryFlaky = &shapes{ + vecs: []int{128, 256, 512}, + floats: []int{32, 64}, +} + +var ternaryFlaky = &shapes{ vecs: []int{128, 256, 512}, floats: []int{32}, } @@ -61,6 +93,7 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io if strings.Contains("aeiou", baseType[:1]) { aOrAn = "an" } + oxFF := fmt.Sprintf("0x%x", uint64((1<= {{.Count}} { + return Load{{.Vec}}Slice(s) + } + if l == 0 { + var x {{.Vec}} + return x + } + + mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) + return LoadMasked{{.Vec}}(pa{{.Vec}}(s), mask) +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { + l := len(s) + if l >= {{.Count}} { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) + x.StoreMasked(pa{{.Vec}}(s), mask) +} +`) + +var avx2MaskedLoadSlicePartTemplate = shapedTemplateOf(avx2MaskedLoadShapes, "avx 2 load slice part", ` +// Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. +// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. +func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { + l := len(s) + if l >= {{.Count}} { + return Load{{.Vec}}Slice(s) + } + if l == 0 { + var x {{.Vec}} + return x + } + mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] + return LoadMasked{{.Vec}}(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).AsMask{{.WxC}}()) +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { + l := len(s) + if l >= {{.Count}} { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] + x.StoreMasked(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).AsMask{{.WxC}}()) +} +`) + +var avx2SmallLoadSlicePartTemplate = shapedTemplateOf(avx2SmallLoadPunShapes, "avx 2 small load slice part", ` +// Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. +// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. +func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { + if len(s) == 0 { + var zero {{.Vec}} + return zero + } + t := unsafe.Slice((*int{{.Width}})(unsafe.Pointer(&s[0])), len(s)) + return LoadInt{{.WxC}}SlicePart(t).As{{.Vec}}() +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int{{.Width}})(unsafe.Pointer(&s[0])), len(s)) + x.AsInt{{.WxC}}().StoreSlicePart(t) +} +`) + +var unsafePATemplate = templateOf("unsafe PA helper", ` +// pa{{.Vec}} returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func pa{{.Vec}}(s []{{.Type}}) *[{{.Count}}]{{.Type}} { + return (*[{{.Count}}]{{.Type}})(unsafe.Pointer(&s[0])) +} +`) + func main() { sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") + ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") bh := flag.String("bh", "binary_helpers_test.go", "file name for binary test helpers") uh := flag.String("uh", "unary_helpers_test.go", "file name for unary test helpers") th := flag.String("th", "ternary_helpers_test.go", "file name for ternary test helpers") @@ -308,16 +487,19 @@ func main() { flag.Parse() if *sl != "" { - one(*sl, prologue, sliceTemplate) + one(*sl, prologue, sliceTemplate, avx512MaskedLoadSlicePartTemplate, avx2MaskedLoadSlicePartTemplate, avx2SmallLoadSlicePartTemplate) + } + if *ush != "" { + one(*ush, unsafePrologue, unsafePATemplate) } if *uh != "" { - one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryTemplateToInt32, unaryTemplateToUint32) + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryTemplateToInt32, unaryTemplateToUint32, unaryFlakyTemplate) } if *bh != "" { one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) } if *th != "" { - one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate) + one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate, ternaryFlakyTemplate) } if *ch != "" { one(*ch, curryTestPrologue("simd methods that compare two operands"), compareTemplate) @@ -327,6 +509,18 @@ func main() { } } +// numberLines takes a slice of bytes, and returns a string where each line +// is numbered, starting from 1. +func numberLines(data []byte) string { + var buf bytes.Buffer + r := bytes.NewReader(data) + s := bufio.NewScanner(r) + for i := 1; s.Scan(); i++ { + fmt.Fprintf(&buf, "%d: %s\n", i, s.Text()) + } + return buf.String() +} + func one(filename string, prologue func(s string, out io.Writer), sats ...shapeAndTemplate) { if filename == "" { return @@ -352,7 +546,9 @@ func one(filename string, prologue func(s string, out io.Writer), sats ...shapeA b, err := format.Source(out.Bytes()) if err != nil { - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v", filename, err) + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) os.Exit(1) } else { ofile.Write(b) diff --git a/src/simd/helpers_test.go b/src/simd/helpers_test.go index 14490a84b2a9e0..6c681abe98c6d7 100644 --- a/src/simd/helpers_test.go +++ b/src/simd/helpers_test.go @@ -29,14 +29,14 @@ type number interface { func checkSlices[T number](t *testing.T, got, want []T) bool { t.Helper() - return checkSlicesLogInput[T](t, got, want, nil) + return checkSlicesLogInput[T](t, got, want, 0.0, nil) } // checkSlices compares two slices for equality, // reporting a test error if there is a problem, // and also consumes the two slices so that a // test/benchmark won't be dead-code eliminated. -func checkSlicesLogInput[T number](t *testing.T, got, want []T, logInput func()) bool { +func checkSlicesLogInput[T number](t *testing.T, got, want []T, flakiness float64, logInput func()) bool { t.Helper() var z T for i := range want { @@ -49,11 +49,32 @@ func checkSlicesLogInput[T number](t *testing.T, got, want []T, logInput func()) if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { continue } + if flakiness > 0 { + if y == 0 { + if math.Abs(float64(x)) < flakiness { + continue + } + } else { + if math.Abs(float64((x-y)/y)) < flakiness { + continue + } + } + } case float64: y := ib.(float64) if math.IsNaN(x) && math.IsNaN(y) { continue } + if flakiness > 0 { + if y == 0 { + if math.Abs(x) < flakiness { + continue + } + } else if math.Abs((x-y)/y) < flakiness { + continue + } + } + default: } @@ -227,13 +248,16 @@ const ( ) var zero = 0.0 +var nzero = -zero +var inf = 1 / zero +var ninf = -1 / zero var nan = math.NaN() // N controls how large the test vectors are const N = 144 -var float32s = nOf(N, []float32{1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1 / zero), float32(-1 / zero), 1 / 2, 1 / 4, 1 / 8, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) -var float64s = nOf(N, []float64{nan, zero, -zero, 1 / zero, -1 / zero, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) +var float32s = nOf(N, []float32{float32(inf), float32(ninf), 1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1.0 / zero), float32(-1.0 / zero), 1.0 / 2, 1.0 / 4, 1.0 / 8, 1.0 / 1000, 1.0 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) +var float64s = nOf(N, []float64{inf, ninf, nan, zero, -zero, 1 / zero, -1 / zero, 0.0001, 0.0000001, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1.0 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) var int32s = nOf(N, []int32{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) var uint32s = nOf(N, []uint32{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint32(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint32(0x55555), ^uint32(0x77777), ^uint32(0xccccc)}) diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go index ec3d7952490de0..8677216d9fa2c4 100644 --- a/src/simd/simulation_helpers_test.go +++ b/src/simd/simulation_helpers_test.go @@ -6,7 +6,9 @@ package simd_test -import "math" +import ( + "math" +) func less[T number](x, y T) bool { return x < y @@ -124,6 +126,22 @@ func toUint32[T number](x T) uint32 { return uint32(x) } +func ceilResidueForPrecision[T float](i int) func(T) T { + f := 1.0 + for i > 0 { + f *= 2 + i-- + } + return func(x T) T { + y := float64(x) + if math.IsInf(float64(x*T(f)), 0) { + return 0 + } + // TODO sort out the rounding issues when T === float32 + return T(y - math.Ceil(y*f)/f) + } +} + // Slice versions of all these elementwise operations func addSlice[T number](x, y []T) []T { diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index ad7bce8964ddcd..bd1d4f153089de 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -4,6 +4,8 @@ package simd +import "unsafe" + // LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s func LoadInt8x16Slice(s []int8) Int8x16 { return LoadInt8x16((*[16]int8)(s)) @@ -303,3 +305,809 @@ func LoadFloat64x8Slice(s []float64) Float64x8 { func (x Float64x8) StoreSlice(s []float64) { x.Store((*[8]float64)(s)) } + +// LoadInt8x64SlicePart loads a Int8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadInt8x64Slice. +func LoadInt8x64SlicePart(s []int8) Int8x64 { + l := len(s) + if l >= 64 { + return LoadInt8x64Slice(s) + } + if l == 0 { + var x Int8x64 + return x + } + + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedInt8x64(paInt8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x64) StoreSlicePart(s []int8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paInt8x64(s), mask) +} + +// LoadInt16x32SlicePart loads a Int16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadInt16x32Slice. +func LoadInt16x32SlicePart(s []int16) Int16x32 { + l := len(s) + if l >= 32 { + return LoadInt16x32Slice(s) + } + if l == 0 { + var x Int16x32 + return x + } + + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedInt16x32(paInt16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x32) StoreSlicePart(s []int16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paInt16x32(s), mask) +} + +// LoadInt32x16SlicePart loads a Int32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt32x16Slice. +func LoadInt32x16SlicePart(s []int32) Int32x16 { + l := len(s) + if l >= 16 { + return LoadInt32x16Slice(s) + } + if l == 0 { + var x Int32x16 + return x + } + + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedInt32x16(paInt32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x16) StoreSlicePart(s []int32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paInt32x16(s), mask) +} + +// LoadInt64x8SlicePart loads a Int64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt64x8Slice. +func LoadInt64x8SlicePart(s []int64) Int64x8 { + l := len(s) + if l >= 8 { + return LoadInt64x8Slice(s) + } + if l == 0 { + var x Int64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedInt64x8(paInt64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x8) StoreSlicePart(s []int64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paInt64x8(s), mask) +} + +// LoadUint8x64SlicePart loads a Uint8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadUint8x64Slice. +func LoadUint8x64SlicePart(s []uint8) Uint8x64 { + l := len(s) + if l >= 64 { + return LoadUint8x64Slice(s) + } + if l == 0 { + var x Uint8x64 + return x + } + + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedUint8x64(paUint8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x64) StoreSlicePart(s []uint8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paUint8x64(s), mask) +} + +// LoadUint16x32SlicePart loads a Uint16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint16x32Slice. +func LoadUint16x32SlicePart(s []uint16) Uint16x32 { + l := len(s) + if l >= 32 { + return LoadUint16x32Slice(s) + } + if l == 0 { + var x Uint16x32 + return x + } + + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedUint16x32(paUint16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x32) StoreSlicePart(s []uint16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paUint16x32(s), mask) +} + +// LoadUint32x16SlicePart loads a Uint32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint32x16Slice. +func LoadUint32x16SlicePart(s []uint32) Uint32x16 { + l := len(s) + if l >= 16 { + return LoadUint32x16Slice(s) + } + if l == 0 { + var x Uint32x16 + return x + } + + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedUint32x16(paUint32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x16) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paUint32x16(s), mask) +} + +// LoadUint64x8SlicePart loads a Uint64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint64x8Slice. +func LoadUint64x8SlicePart(s []uint64) Uint64x8 { + l := len(s) + if l >= 8 { + return LoadUint64x8Slice(s) + } + if l == 0 { + var x Uint64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedUint64x8(paUint64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x8) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paUint64x8(s), mask) +} + +// LoadFloat32x16SlicePart loads a Float32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadFloat32x16Slice. +func LoadFloat32x16SlicePart(s []float32) Float32x16 { + l := len(s) + if l >= 16 { + return LoadFloat32x16Slice(s) + } + if l == 0 { + var x Float32x16 + return x + } + + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedFloat32x16(paFloat32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x16) StoreSlicePart(s []float32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paFloat32x16(s), mask) +} + +// LoadFloat64x8SlicePart loads a Float64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat64x8Slice. +func LoadFloat64x8SlicePart(s []float64) Float64x8 { + l := len(s) + if l >= 8 { + return LoadFloat64x8Slice(s) + } + if l == 0 { + var x Float64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedFloat64x8(paFloat64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x8) StoreSlicePart(s []float64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paFloat64x8(s), mask) +} + +// LoadInt32x4SlicePart loads a Int32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. +func LoadInt32x4SlicePart(s []int32) Int32x4 { + l := len(s) + if l >= 4 { + return LoadInt32x4Slice(s) + } + if l == 0 { + var x Int32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x4) StoreSlicePart(s []int32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadInt64x2SlicePart loads a Int64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. +func LoadInt64x2SlicePart(s []int64) Int64x2 { + l := len(s) + if l >= 2 { + return LoadInt64x2Slice(s) + } + if l == 0 { + var x Int64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x2) StoreSlicePart(s []int64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. +func LoadUint32x4SlicePart(s []uint32) Uint32x4 { + l := len(s) + if l >= 4 { + return LoadUint32x4Slice(s) + } + if l == 0 { + var x Uint32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x4) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. +func LoadUint64x2SlicePart(s []uint64) Uint64x2 { + l := len(s) + if l >= 2 { + return LoadUint64x2Slice(s) + } + if l == 0 { + var x Uint64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x2) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. +func LoadFloat32x4SlicePart(s []float32) Float32x4 { + l := len(s) + if l >= 4 { + return LoadFloat32x4Slice(s) + } + if l == 0 { + var x Float32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x4) StoreSlicePart(s []float32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. +func LoadFloat64x2SlicePart(s []float64) Float64x2 { + l := len(s) + if l >= 2 { + return LoadFloat64x2Slice(s) + } + if l == 0 { + var x Float64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x2) StoreSlicePart(s []float64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadInt32x8SlicePart loads a Int32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. +func LoadInt32x8SlicePart(s []int32) Int32x8 { + l := len(s) + if l >= 8 { + return LoadInt32x8Slice(s) + } + if l == 0 { + var x Int32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x8) StoreSlicePart(s []int32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadInt64x4SlicePart loads a Int64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. +func LoadInt64x4SlicePart(s []int64) Int64x4 { + l := len(s) + if l >= 4 { + return LoadInt64x4Slice(s) + } + if l == 0 { + var x Int64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x4) StoreSlicePart(s []int64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. +func LoadUint32x8SlicePart(s []uint32) Uint32x8 { + l := len(s) + if l >= 8 { + return LoadUint32x8Slice(s) + } + if l == 0 { + var x Uint32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x8) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. +func LoadUint64x4SlicePart(s []uint64) Uint64x4 { + l := len(s) + if l >= 4 { + return LoadUint64x4Slice(s) + } + if l == 0 { + var x Uint64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x4) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. +func LoadFloat32x8SlicePart(s []float32) Float32x8 { + l := len(s) + if l >= 8 { + return LoadFloat32x8Slice(s) + } + if l == 0 { + var x Float32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x8) StoreSlicePart(s []float32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. +func LoadFloat64x4SlicePart(s []float64) Float64x4 { + l := len(s) + if l >= 4 { + return LoadFloat64x4Slice(s) + } + if l == 0 { + var x Float64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x4) StoreSlicePart(s []float64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. +func LoadUint8x16SlicePart(s []uint8) Uint8x16 { + if len(s) == 0 { + var zero Uint8x16 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x16SlicePart(t).AsUint8x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x16) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x16().StoreSlicePart(t) +} + +// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. +func LoadUint16x8SlicePart(s []uint16) Uint16x8 { + if len(s) == 0 { + var zero Uint16x8 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x8SlicePart(t).AsUint16x8() +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x8) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x8().StoreSlicePart(t) +} + +// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. +func LoadUint8x32SlicePart(s []uint8) Uint8x32 { + if len(s) == 0 { + var zero Uint8x32 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x32SlicePart(t).AsUint8x32() +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x32) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x32().StoreSlicePart(t) +} + +// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. +func LoadUint16x16SlicePart(s []uint16) Uint16x16 { + if len(s) == 0 { + var zero Uint16x16 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x16SlicePart(t).AsUint16x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x16) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x16().StoreSlicePart(t) +} diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 3fcfc6255ba4ff..6d0b5a41f298bc 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -11,7 +11,7 @@ import "unsafe" // Implementation of all the {Int,Uint}{8,16} load and store slice part // functions and methods for 128-bit and 256-bit vectors. -/* pointer-punning functions. */ +/* pointer-punning functions for chunked slice part loads. */ func int16atP8(p *int8) *int16 { return (*int16)(unsafe.Pointer(p)) @@ -41,100 +41,24 @@ func int32atP64(p *int64) *int32 { return (*int32)(unsafe.Pointer(p)) } -/* unsigned versions of integer slice part loads */ +/* These two masks are used by generated code */ -// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. -func LoadUint8x16SlicePart(s []uint8) Uint8x16 { - if len(s) == 0 { - var zero Uint8x16 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x16SlicePart(t).AsUint8x16() -} - -// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. -func LoadUint16x8SlicePart(s []uint16) Uint16x8 { - if len(s) == 0 { - var zero Uint16x8 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x8SlicePart(t).AsUint16x8() -} - -// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. -func LoadUint8x32SlicePart(s []uint8) Uint8x32 { - if len(s) == 0 { - var zero Uint8x32 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x32SlicePart(t).AsUint8x32() -} - -// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. -func LoadUint16x16SlicePart(s []uint16) Uint16x16 { - if len(s) == 0 { - var zero Uint16x16 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x16SlicePart(t).AsUint16x16() -} - -/* unsigned versions of integer slice part stores*/ - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x16) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x16().StoreSlicePart(t) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x8) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x8().StoreSlicePart(t) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x32) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x32().StoreSlicePart(t) +var vecMask64 = [16]int64{ + -1, -1, -1, -1, + -1, -1, -1, -1, + 0, 0, 0, 0, + 0, 0, 0, 0, } -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x16) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x16().StoreSlicePart(t) +var vecMask32 = [32]int32{ + -1, -1, -1, -1, + -1, -1, -1, -1, + -1, -1, -1, -1, + -1, -1, -1, -1, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, } /* 256-bit int vector loads and stores made from 128-bit parts */ @@ -389,401 +313,3 @@ func (x Int16x8) StoreSlicePart(s []int16) { } return } - -var vecMask64 = [16]int64{ - -1, -1, -1, -1, - -1, -1, -1, -1, - 0, 0, 0, 0, - 0, 0, 0, 0, -} - -// paInt32x4 is an unchecked cast from a slice to an -// pointer-to-array type, for used in a masked -// load/store. In practice, the slice will be too -// short, so this has to be unsafe, and its only -// use must be with an instruction with masked -// load/store effect (including faults). -func paInt32x4(s []int32) *[4]int32 { - return (*[4]int32)(unsafe.Pointer(&s[0])) -} - -func paInt32x8(s []int32) *[8]int32 { - return (*[8]int32)(unsafe.Pointer(&s[0])) -} - -func paInt64x2(s []int64) *[2]int64 { - return (*[2]int64)(unsafe.Pointer(&s[0])) -} - -func paInt64x4(s []int64) *[4]int64 { - return (*[4]int64)(unsafe.Pointer(&s[0])) -} - -// For 512-bit masked loads/stores - -func paInt64x8(s []int64) *[8]int64 { - return (*[8]int64)(unsafe.Pointer(&s[0])) -} - -func paInt32x16(s []int32) *[16]int32 { - return (*[16]int32)(unsafe.Pointer(&s[0])) -} - -func paInt16x32(s []int16) *[32]int16 { - return (*[32]int16)(unsafe.Pointer(&s[0])) -} - -func paInt8x64(s []int8) *[64]int8 { - return (*[64]int8)(unsafe.Pointer(&s[0])) -} - -/* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ - -// LoadInt32x4SlicePart loads a Int32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. -func LoadInt32x4SlicePart(s []int32) Int32x4 { - l := len(s) - if l >= 4 { - return LoadInt32x4Slice(s) - } - if l == 0 { - var x Int32x4 - return x - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x4) StoreSlicePart(s []int32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadInt32x8SlicePart loads a Int32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. -func LoadInt32x8SlicePart(s []int32) Int32x8 { - l := len(s) - if l >= 8 { - return LoadInt32x8Slice(s) - } - if l == 0 { - var x Int32x8 - return x - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadInt64x2SlicePart loads a Int64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. -func LoadInt64x2SlicePart(s []int64) Int64x2 { - l := len(s) - if l >= 2 { - return LoadInt64x2Slice(s) - } - if l == 0 { - var x Int64x2 - return x - } - - mask := vecMask64[8-l:] - return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadInt64x4SlicePart loads a Int64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. -func LoadInt64x4SlicePart(s []int64) Int64x4 { - l := len(s) - if l >= 4 { - return LoadInt64x4Slice(s) - } - if l == 0 { - var x Int64x4 - return x - } - - mask := vecMask64[8-l:] - return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x8) StoreSlicePart(s []int32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x2) StoreSlicePart(s []int64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[8-l:] - x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x4) StoreSlicePart(s []int64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[8-l:] - x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// Handle float32, float64, uint32, and uint64 with ugly casts. - -// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. -func LoadUint32x4SlicePart(s []uint32) Uint32x4 { - if len(s) == 0 { - var zero Uint32x4 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x4SlicePart(t).AsUint32x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x4) StoreSlicePart(s []uint32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x4().StoreSlicePart(t) -} - -// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. -func LoadUint32x8SlicePart(s []uint32) Uint32x8 { - if len(s) == 0 { - var zero Uint32x8 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x8SlicePart(t).AsUint32x8() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x8) StoreSlicePart(s []uint32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x8().StoreSlicePart(t) -} - -// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. -func LoadUint64x2SlicePart(s []uint64) Uint64x2 { - if len(s) == 0 { - var zero Uint64x2 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x2SlicePart(t).AsUint64x2() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x2) StoreSlicePart(s []uint64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x2().StoreSlicePart(t) -} - -// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. -func LoadUint64x4SlicePart(s []uint64) Uint64x4 { - if len(s) == 0 { - var zero Uint64x4 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x4SlicePart(t).AsUint64x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x4) StoreSlicePart(s []uint64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x4().StoreSlicePart(t) -} - -// Float32xK and Float64xK - -// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. -func LoadFloat32x4SlicePart(s []float32) Float32x4 { - if len(s) == 0 { - var zero Float32x4 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x4SlicePart(t).AsFloat32x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x4) StoreSlicePart(s []float32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x4().StoreSlicePart(t) -} - -// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. -func LoadFloat32x8SlicePart(s []float32) Float32x8 { - if len(s) == 0 { - var zero Float32x8 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x8SlicePart(t).AsFloat32x8() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x8) StoreSlicePart(s []float32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x8().StoreSlicePart(t) -} - -// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. -func LoadFloat64x2SlicePart(s []float64) Float64x2 { - if len(s) == 0 { - var zero Float64x2 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x2SlicePart(t).AsFloat64x2() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x2) StoreSlicePart(s []float64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x2().StoreSlicePart(t) -} - -// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. -func LoadFloat64x4SlicePart(s []float64) Float64x4 { - if len(s) == 0 { - var zero Float64x4 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x4SlicePart(t).AsFloat64x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x4) StoreSlicePart(s []float64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x4().StoreSlicePart(t) -} - -func LoadInt64x8SlicePart(s []int64) Int64x8 { - l := len(s) - if l >= 8 { - return LoadInt64x8Slice(s) - } - if l == 0 { - var x Int64x8 - return x - } - - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedInt64x8(paInt64x8(s), mask) -} - -func (x Int64x8) StoreSlicePart(s []int64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paInt64x8(s), mask) -} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index c9492bea1ba2dc..07869e954b3e5b 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -367,7 +367,7 @@ func TestSlicePartInt64(t *testing.T) { b := make([]int64, L) v.StoreSlice(b) // test the load - checkSlicesLogInput(t, b, d, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) + checkSlicesLogInput(t, b, d, 0.0, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) // Test the store f := make([]int64, L+1) diff --git a/src/simd/ternary_helpers_test.go b/src/simd/ternary_helpers_test.go index e48ec2409c133c..401270c7bdcc8d 100644 --- a/src/simd/ternary_helpers_test.go +++ b/src/simd/ternary_helpers_test.go @@ -25,7 +25,7 @@ func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, g := make([]int8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -41,7 +41,7 @@ func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, g := make([]int16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -57,7 +57,7 @@ func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, g := make([]int32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -73,7 +73,7 @@ func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, g := make([]int64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -89,7 +89,7 @@ func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x g := make([]uint8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -105,7 +105,7 @@ func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16 g := make([]uint16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -121,7 +121,7 @@ func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32 g := make([]uint32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -137,7 +137,7 @@ func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64 g := make([]uint64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -153,7 +153,7 @@ func testFloat32x4Ternary(t *testing.T, f func(_, _, _ simd.Float32x4) simd.Floa g := make([]float32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -169,7 +169,7 @@ func testFloat64x2Ternary(t *testing.T, f func(_, _, _ simd.Float64x2) simd.Floa g := make([]float64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -185,7 +185,7 @@ func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, g := make([]int8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -201,7 +201,7 @@ func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x g := make([]int16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -217,7 +217,7 @@ func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, g := make([]int32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -233,7 +233,7 @@ func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, g := make([]int64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -249,7 +249,7 @@ func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x g := make([]uint8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -265,7 +265,7 @@ func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint g := make([]uint16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -281,7 +281,7 @@ func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32 g := make([]uint32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -297,7 +297,7 @@ func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64 g := make([]uint64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -313,7 +313,7 @@ func testFloat32x8Ternary(t *testing.T, f func(_, _, _ simd.Float32x8) simd.Floa g := make([]float32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -329,7 +329,7 @@ func testFloat64x4Ternary(t *testing.T, f func(_, _, _ simd.Float64x4) simd.Floa g := make([]float64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -345,7 +345,7 @@ func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, g := make([]int8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -361,7 +361,7 @@ func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x g := make([]int16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -377,7 +377,7 @@ func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x g := make([]int32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -393,7 +393,7 @@ func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, g := make([]int64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -409,7 +409,7 @@ func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x g := make([]uint8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -425,7 +425,7 @@ func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint g := make([]uint16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -441,7 +441,7 @@ func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint g := make([]uint32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -457,7 +457,7 @@ func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64 g := make([]uint64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -473,7 +473,7 @@ func testFloat32x16Ternary(t *testing.T, f func(_, _, _ simd.Float32x16) simd.Fl g := make([]float32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -489,6 +489,57 @@ func testFloat64x8Ternary(t *testing.T, f func(_, _, _ simd.Float64x8) simd.Floa g := make([]float64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x4TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x4TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x4) simd.Float32x4, want func(x, y, z []float32) []float32, flakiness float64) { + n := 4 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + c := simd.LoadFloat32x4Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x8TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x8TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x8) simd.Float32x8, want func(x, y, z []float32) []float32, flakiness float64) { + n := 8 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + c := simd.LoadFloat32x8Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x16TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x16TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x16) simd.Float32x16, want func(x, y, z []float32) []float32, flakiness float64) { + n := 16 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + c := simd.LoadFloat32x16Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } diff --git a/src/simd/ternary_test.go b/src/simd/ternary_test.go index afca850d6147e8..9ce0ff7676eaa3 100644 --- a/src/simd/ternary_test.go +++ b/src/simd/ternary_test.go @@ -13,9 +13,9 @@ import ( func TestFMA(t *testing.T) { if simd.HasAVX512() { - testFloat32x4Ternary(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32]) - testFloat32x8Ternary(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32]) - testFloat32x16Ternary(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32]) + testFloat32x4TernaryFlaky(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32], 0.001) + testFloat32x8TernaryFlaky(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32], 0.001) + testFloat32x16TernaryFlaky(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32], 0.001) testFloat64x2Ternary(t, simd.Float64x2.FusedMultiplyAdd, fmaSlice[float64]) testFloat64x4Ternary(t, simd.Float64x4.FusedMultiplyAdd, fmaSlice[float64]) testFloat64x8Ternary(t, simd.Float64x8.FusedMultiplyAdd, fmaSlice[float64]) diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go index 4e0f09428e76bb..f5b9e3b676b21e 100644 --- a/src/simd/unary_helpers_test.go +++ b/src/simd/unary_helpers_test.go @@ -23,7 +23,7 @@ func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want fu g := make([]int8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -37,7 +37,7 @@ func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want fu g := make([]int16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -51,7 +51,7 @@ func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want fu g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -65,7 +65,7 @@ func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want fu g := make([]int64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -79,7 +79,7 @@ func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -93,7 +93,7 @@ func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -107,7 +107,7 @@ func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -121,7 +121,7 @@ func testUint64x2Unary(t *testing.T, f func(_ simd.Uint64x2) simd.Uint64x2, want g := make([]uint64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -135,7 +135,7 @@ func testFloat32x4Unary(t *testing.T, f func(_ simd.Float32x4) simd.Float32x4, w g := make([]float32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -149,7 +149,7 @@ func testFloat64x2Unary(t *testing.T, f func(_ simd.Float64x2) simd.Float64x2, w g := make([]float64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -163,7 +163,7 @@ func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want fu g := make([]int8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -177,7 +177,7 @@ func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want g := make([]int16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -191,7 +191,7 @@ func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want fu g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -205,7 +205,7 @@ func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want fu g := make([]int64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -219,7 +219,7 @@ func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -233,7 +233,7 @@ func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, w g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -247,7 +247,7 @@ func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -261,7 +261,7 @@ func testUint64x4Unary(t *testing.T, f func(_ simd.Uint64x4) simd.Uint64x4, want g := make([]uint64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -275,7 +275,7 @@ func testFloat32x8Unary(t *testing.T, f func(_ simd.Float32x8) simd.Float32x8, w g := make([]float32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -289,7 +289,7 @@ func testFloat64x4Unary(t *testing.T, f func(_ simd.Float64x4) simd.Float64x4, w g := make([]float64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -303,7 +303,7 @@ func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want fu g := make([]int8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -317,7 +317,7 @@ func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want g := make([]int16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -331,7 +331,7 @@ func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -345,7 +345,7 @@ func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want fu g := make([]int64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -359,7 +359,7 @@ func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -373,7 +373,7 @@ func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, w g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -387,7 +387,7 @@ func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, w g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -401,7 +401,7 @@ func testUint64x8Unary(t *testing.T, f func(_ simd.Uint64x8) simd.Uint64x8, want g := make([]uint64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -415,7 +415,7 @@ func testFloat32x16Unary(t *testing.T, f func(_ simd.Float32x16) simd.Float32x16 g := make([]float32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -429,7 +429,7 @@ func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, w g := make([]float64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -443,7 +443,7 @@ func testFloat32x4UnaryToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32 g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -457,7 +457,7 @@ func testFloat32x8UnaryToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32 g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -471,7 +471,7 @@ func testFloat32x16UnaryToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -485,7 +485,7 @@ func testFloat32x4UnaryToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -499,7 +499,7 @@ func testFloat32x8UnaryToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -513,6 +513,96 @@ func testFloat32x16UnaryToUint32(t *testing.T, f func(x simd.Float32x16) simd.Ui g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x4UnaryFlaky(t *testing.T, f func(x simd.Float32x4) simd.Float32x4, want func(x []float32) []float32, flakiness float64) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x2UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x2UnaryFlaky(t *testing.T, f func(x simd.Float64x2) simd.Float64x2, want func(x []float64) []float64, flakiness float64) { + n := 2 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x8UnaryFlaky(t *testing.T, f func(x simd.Float32x8) simd.Float32x8, want func(x []float32) []float32, flakiness float64) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x4UnaryFlaky(t *testing.T, f func(x simd.Float64x4) simd.Float64x4, want func(x []float64) []float64, flakiness float64) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x16UnaryFlaky(t *testing.T, f func(x simd.Float32x16) simd.Float32x16, want func(x []float32) []float32, flakiness float64) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x8UnaryFlaky(t *testing.T, f func(x simd.Float64x8) simd.Float64x8, want func(x []float64) []float64, flakiness float64) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) }) } diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index 6565df30965742..4263b81cd734ab 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -7,6 +7,7 @@ package simd_test import ( + "math" "simd" "testing" ) @@ -88,6 +89,23 @@ func TestToInt32(t *testing.T) { testFloat32x8UnaryToInt32(t, simd.Float32x8.ConvertToInt32, toInt32Slice[float32]) } +func TestDiffWithCeilWithPrecision(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testFloat64x8UnaryFlaky(t, + func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(0) }, + map1(ceilResidueForPrecision[float64](0)), + 0.001) + testFloat64x8UnaryFlaky(t, + func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(1) }, + map1(ceilResidueForPrecision[float64](1)), + 0.001) + testFloat64x8Unary(t, + func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilWithPrecision(0)) }, + map1[float64](func(x float64) float64 { return x - math.Ceil(x) })) +} + func TestToUint32(t *testing.T) { if !simd.HasAVX512() { t.Skip("Needs AVX512") diff --git a/src/simd/unsafe_helpers.go b/src/simd/unsafe_helpers.go new file mode 100644 index 00000000000000..c6ea50d551b123 --- /dev/null +++ b/src/simd/unsafe_helpers.go @@ -0,0 +1,217 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +import "unsafe" + +// paInt8x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt8x16(s []int8) *[16]int8 { + return (*[16]int8)(unsafe.Pointer(&s[0])) +} + +// paInt16x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt16x8(s []int16) *[8]int16 { + return (*[8]int16)(unsafe.Pointer(&s[0])) +} + +// paInt32x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt32x4(s []int32) *[4]int32 { + return (*[4]int32)(unsafe.Pointer(&s[0])) +} + +// paInt64x2 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt64x2(s []int64) *[2]int64 { + return (*[2]int64)(unsafe.Pointer(&s[0])) +} + +// paUint8x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint8x16(s []uint8) *[16]uint8 { + return (*[16]uint8)(unsafe.Pointer(&s[0])) +} + +// paUint16x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint16x8(s []uint16) *[8]uint16 { + return (*[8]uint16)(unsafe.Pointer(&s[0])) +} + +// paUint32x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint32x4(s []uint32) *[4]uint32 { + return (*[4]uint32)(unsafe.Pointer(&s[0])) +} + +// paUint64x2 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint64x2(s []uint64) *[2]uint64 { + return (*[2]uint64)(unsafe.Pointer(&s[0])) +} + +// paFloat32x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat32x4(s []float32) *[4]float32 { + return (*[4]float32)(unsafe.Pointer(&s[0])) +} + +// paFloat64x2 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat64x2(s []float64) *[2]float64 { + return (*[2]float64)(unsafe.Pointer(&s[0])) +} + +// paInt8x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt8x32(s []int8) *[32]int8 { + return (*[32]int8)(unsafe.Pointer(&s[0])) +} + +// paInt16x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt16x16(s []int16) *[16]int16 { + return (*[16]int16)(unsafe.Pointer(&s[0])) +} + +// paInt32x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt32x8(s []int32) *[8]int32 { + return (*[8]int32)(unsafe.Pointer(&s[0])) +} + +// paInt64x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt64x4(s []int64) *[4]int64 { + return (*[4]int64)(unsafe.Pointer(&s[0])) +} + +// paUint8x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint8x32(s []uint8) *[32]uint8 { + return (*[32]uint8)(unsafe.Pointer(&s[0])) +} + +// paUint16x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint16x16(s []uint16) *[16]uint16 { + return (*[16]uint16)(unsafe.Pointer(&s[0])) +} + +// paUint32x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint32x8(s []uint32) *[8]uint32 { + return (*[8]uint32)(unsafe.Pointer(&s[0])) +} + +// paUint64x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint64x4(s []uint64) *[4]uint64 { + return (*[4]uint64)(unsafe.Pointer(&s[0])) +} + +// paFloat32x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat32x8(s []float32) *[8]float32 { + return (*[8]float32)(unsafe.Pointer(&s[0])) +} + +// paFloat64x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat64x4(s []float64) *[4]float64 { + return (*[4]float64)(unsafe.Pointer(&s[0])) +} + +// paInt8x64 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt8x64(s []int8) *[64]int8 { + return (*[64]int8)(unsafe.Pointer(&s[0])) +} + +// paInt16x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt16x32(s []int16) *[32]int16 { + return (*[32]int16)(unsafe.Pointer(&s[0])) +} + +// paInt32x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt32x16(s []int32) *[16]int32 { + return (*[16]int32)(unsafe.Pointer(&s[0])) +} + +// paInt64x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt64x8(s []int64) *[8]int64 { + return (*[8]int64)(unsafe.Pointer(&s[0])) +} + +// paUint8x64 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint8x64(s []uint8) *[64]uint8 { + return (*[64]uint8)(unsafe.Pointer(&s[0])) +} + +// paUint16x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint16x32(s []uint16) *[32]uint16 { + return (*[32]uint16)(unsafe.Pointer(&s[0])) +} + +// paUint32x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint32x16(s []uint32) *[16]uint32 { + return (*[16]uint32)(unsafe.Pointer(&s[0])) +} + +// paUint64x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint64x8(s []uint64) *[8]uint64 { + return (*[8]uint64)(unsafe.Pointer(&s[0])) +} + +// paFloat32x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat32x16(s []float32) *[16]float32 { + return (*[16]float32)(unsafe.Pointer(&s[0])) +} + +// paFloat64x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat64x8(s []float64) *[8]float64 { + return (*[8]float64)(unsafe.Pointer(&s[0])) +} From 6b9b59e144a0db697b0e22920ff0b7e0b51c0945 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 1 Aug 2025 15:58:29 -0400 Subject: [PATCH 111/139] [dev.simd] simd, cmd/compile: rename some methods generated by simdgen CL 692556 these are the "easy" ones SaturatedOp -> OpSaturated PairwiseOp -> OpPairs OpWithPrecision -> OpScaled DiffWithOpWithPrecision -> OpScaledResidue Change-Id: I036bf89c0690bcf9922c376d62cef48392942af3 Reviewed-on: https://go-review.googlesource.com/c/go/+/692357 Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 202 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 404 +- .../internal/ssa/_gen/simdgenericOps.go | 404 +- src/cmd/compile/internal/ssa/opGen.go | 1866 ++++---- src/cmd/compile/internal/ssa/rewriteAMD64.go | 3750 ++++++++--------- .../compile/internal/ssagen/simdintrinsics.go | 404 +- src/simd/binary_test.go | 50 +- src/simd/ops_amd64.go | 2496 ++++++----- src/simd/unary_test.go | 8 +- 9 files changed, 4790 insertions(+), 4794 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 15ffbf66fa7cbb..76ef42576d32c3 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -80,6 +80,22 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQ128, ssa.OpAMD64VPADDQ256, ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPADDSW512, ssa.OpAMD64VADDSUBPS128, ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, @@ -189,12 +205,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VMULPD128, ssa.OpAMD64VMULPD256, ssa.OpAMD64VMULPD512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VPMULDQ128, ssa.OpAMD64VPMULDQ256, ssa.OpAMD64VPMULDQ512, @@ -207,15 +226,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULHUW128, ssa.OpAMD64VPMULHUW256, ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, @@ -223,22 +233,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWD128, ssa.OpAMD64VPMADDWD256, ssa.OpAMD64VPMADDWD512, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPHSUBD256, ssa.OpAMD64VPERMB128, ssa.OpAMD64VPERMB256, ssa.OpAMD64VPERMB512, @@ -265,25 +259,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQ128, ssa.OpAMD64VPRORVQ256, ssa.OpAMD64VPRORVQ512, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPMADDUBSW128, ssa.OpAMD64VPMADDUBSW256, ssa.OpAMD64VPMADDUBSW512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VSCALEFPD512, ssa.OpAMD64VPSLLVW128, ssa.OpAMD64VPSLLVW256, ssa.OpAMD64VPSLLVW512, @@ -335,6 +319,22 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQ128, ssa.OpAMD64VPSUBQ256, ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPXOR128, ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, @@ -369,6 +369,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -456,12 +462,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPMULDQMasked128, ssa.OpAMD64VPMULDQMasked256, ssa.OpAMD64VPMULDQMasked512, @@ -474,6 +474,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, @@ -483,12 +489,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -524,21 +524,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPSLLVWMasked128, ssa.OpAMD64VPSLLVWMasked256, ssa.OpAMD64VPSLLVWMasked512, @@ -584,6 +578,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, @@ -1085,6 +1085,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -1121,6 +1127,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VREDUCEPSMasked128, + ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPSMasked512, + ssa.OpAMD64VREDUCEPDMasked128, + ssa.OpAMD64VREDUCEPDMasked256, + ssa.OpAMD64VREDUCEPDMasked512, ssa.OpAMD64VCOMPRESSPSMasked128, ssa.OpAMD64VCOMPRESSPSMasked256, ssa.OpAMD64VCOMPRESSPSMasked512, @@ -1145,12 +1157,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, - ssa.OpAMD64VREDUCEPSMasked128, - ssa.OpAMD64VREDUCEPSMasked256, - ssa.OpAMD64VREDUCEPSMasked512, - ssa.OpAMD64VREDUCEPDMasked128, - ssa.OpAMD64VREDUCEPDMasked256, - ssa.OpAMD64VREDUCEPDMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, ssa.OpAMD64VDIVPSMasked512, @@ -1244,12 +1250,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPMULDQMasked128, ssa.OpAMD64VPMULDQMasked256, ssa.OpAMD64VPMULDQMasked512, @@ -1262,6 +1262,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, @@ -1271,12 +1277,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -1357,24 +1357,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, ssa.OpAMD64VPDPWSSDSMasked512, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, @@ -1489,6 +1483,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 1d54cfcdbddebb..060f220c7de758 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -90,6 +90,44 @@ (AddMaskedUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (AddMaskedUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (AddMaskedUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(AddPairsFloat32x4 ...) => (VHADDPS128 ...) +(AddPairsFloat32x8 ...) => (VHADDPS256 ...) +(AddPairsFloat64x2 ...) => (VHADDPD128 ...) +(AddPairsFloat64x4 ...) => (VHADDPD256 ...) +(AddPairsInt16x8 ...) => (VPHADDW128 ...) +(AddPairsInt16x16 ...) => (VPHADDW256 ...) +(AddPairsInt32x4 ...) => (VPHADDD128 ...) +(AddPairsInt32x8 ...) => (VPHADDD256 ...) +(AddPairsUint16x8 ...) => (VPHADDW128 ...) +(AddPairsUint16x16 ...) => (VPHADDW256 ...) +(AddPairsUint32x4 ...) => (VPHADDD128 ...) +(AddPairsUint32x8 ...) => (VPHADDD256 ...) +(AddPairsSaturatedInt16x8 ...) => (VPHADDSW128 ...) +(AddPairsSaturatedInt16x16 ...) => (VPHADDSW256 ...) +(AddSaturatedInt8x16 ...) => (VPADDSB128 ...) +(AddSaturatedInt8x32 ...) => (VPADDSB256 ...) +(AddSaturatedInt8x64 ...) => (VPADDSB512 ...) +(AddSaturatedInt16x8 ...) => (VPADDSW128 ...) +(AddSaturatedInt16x16 ...) => (VPADDSW256 ...) +(AddSaturatedInt16x32 ...) => (VPADDSW512 ...) +(AddSaturatedUint8x16 ...) => (VPADDSB128 ...) +(AddSaturatedUint8x32 ...) => (VPADDSB256 ...) +(AddSaturatedUint8x64 ...) => (VPADDSB512 ...) +(AddSaturatedUint16x8 ...) => (VPADDSW128 ...) +(AddSaturatedUint16x16 ...) => (VPADDSW256 ...) +(AddSaturatedUint16x32 ...) => (VPADDSW512 ...) +(AddSaturatedMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddSaturatedMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddSaturatedMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddSaturatedMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddSaturatedMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddSaturatedMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddSaturatedMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddSaturatedMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddSaturatedMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddSaturatedMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddSaturatedMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddSaturatedMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) @@ -206,18 +244,30 @@ (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) (CeilFloat64x4 x) => (VROUNDPD256 [2] x) -(CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) -(CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) -(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) -(CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) -(CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) -(CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) -(CeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(CeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(CeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(CeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(CeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(CeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(CeilScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) +(CeilScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) +(CeilScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) +(CeilScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) +(CeilScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) +(CeilScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) +(CeilScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(CeilScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(CeilScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(CeilScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(CeilScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(CeilScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(CeilScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) +(CeilScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) +(CeilScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) +(CeilScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) +(CeilScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) +(CeilScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) +(CeilScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(CeilScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(CeilScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(CeilScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(CeilScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(CeilScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (CompressFloat32x4 x mask) => (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) (CompressFloat32x8 x mask) => (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) (CompressFloat32x16 x mask) => (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) @@ -260,54 +310,6 @@ (ConvertToUint32MaskedFloat32x4 x mask) => (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) (ConvertToUint32MaskedFloat32x8 x mask) => (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) (ConvertToUint32MaskedFloat32x16 x mask) => (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) -(DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) -(DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) -(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) -(DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) -(DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) -(DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) -(DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) -(DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) -(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) -(DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) -(DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) -(DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) -(DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) -(DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) -(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) -(DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) -(DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) -(DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) -(DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) -(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) -(DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) -(DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) -(DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) -(DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) (DivFloat32x16 ...) => (VDIVPS512 ...) @@ -387,18 +389,30 @@ (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) (FloorFloat64x4 x) => (VROUNDPD256 [1] x) -(FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) -(FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) -(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) -(FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) -(FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) -(FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) -(FloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(FloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(FloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(FloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(FloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(FloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(FloorScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) +(FloorScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) +(FloorScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) +(FloorScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) +(FloorScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) +(FloorScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) +(FloorScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(FloorScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(FloorScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(FloorScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(FloorScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(FloorScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(FloorScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) +(FloorScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) +(FloorScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) +(FloorScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) +(FloorScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) +(FloorScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) +(FloorScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(FloorScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(FloorScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(FloorScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(FloorScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(FloorScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) (FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) (FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) @@ -849,18 +863,15 @@ (MulFloat64x2 ...) => (VMULPD128 ...) (MulFloat64x4 ...) => (VMULPD256 ...) (MulFloat64x8 ...) => (VMULPD512 ...) -(MulByPowOf2Float32x4 ...) => (VSCALEFPS128 ...) -(MulByPowOf2Float32x8 ...) => (VSCALEFPS256 ...) -(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) -(MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) -(MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) -(MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) -(MulByPowOf2MaskedFloat32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MulByPowOf2MaskedFloat32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MulByPowOf2MaskedFloat32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MulByPowOf2MaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MulByPowOf2MaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MulByPowOf2MaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MulInt16x8 ...) => (VPMULLW128 ...) +(MulInt16x16 ...) => (VPMULLW256 ...) +(MulInt16x32 ...) => (VPMULLW512 ...) +(MulInt32x4 ...) => (VPMULLD128 ...) +(MulInt32x8 ...) => (VPMULLD256 ...) +(MulInt32x16 ...) => (VPMULLD512 ...) +(MulInt64x2 ...) => (VPMULLQ128 ...) +(MulInt64x4 ...) => (VPMULLQ256 ...) +(MulInt64x8 ...) => (VPMULLQ512 ...) (MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) (MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) @@ -889,30 +900,21 @@ (MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) (MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) (MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulLowInt16x8 ...) => (VPMULLW128 ...) -(MulLowInt16x16 ...) => (VPMULLW256 ...) -(MulLowInt16x32 ...) => (VPMULLW512 ...) -(MulLowInt32x4 ...) => (VPMULLD128 ...) -(MulLowInt32x8 ...) => (VPMULLD256 ...) -(MulLowInt32x16 ...) => (VPMULLD512 ...) -(MulLowInt64x2 ...) => (VPMULLQ128 ...) -(MulLowInt64x4 ...) => (VPMULLQ256 ...) -(MulLowInt64x8 ...) => (VPMULLQ512 ...) -(MulLowMaskedInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulLowMaskedInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulLowMaskedInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulLowMaskedInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) -(MulLowMaskedInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) -(MulLowMaskedInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) -(MulLowMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulLowMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulLowMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) (MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) (MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) (MulMaskedFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) (MulMaskedFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) (MulMaskedFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MulMaskedInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulMaskedInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulMaskedInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulMaskedInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MulMaskedInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MulMaskedInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MulMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) (NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) @@ -1015,30 +1017,6 @@ (PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) -(PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) -(PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) -(PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) -(PairwiseAddFloat64x4 ...) => (VHADDPD256 ...) -(PairwiseAddInt16x8 ...) => (VPHADDW128 ...) -(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) -(PairwiseAddInt32x4 ...) => (VPHADDD128 ...) -(PairwiseAddInt32x8 ...) => (VPHADDD256 ...) -(PairwiseAddUint16x8 ...) => (VPHADDW128 ...) -(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) -(PairwiseAddUint32x4 ...) => (VPHADDD128 ...) -(PairwiseAddUint32x8 ...) => (VPHADDD256 ...) -(PairwiseSubFloat32x4 ...) => (VHSUBPS128 ...) -(PairwiseSubFloat32x8 ...) => (VHSUBPS256 ...) -(PairwiseSubFloat64x2 ...) => (VHSUBPD128 ...) -(PairwiseSubFloat64x4 ...) => (VHSUBPD256 ...) -(PairwiseSubInt16x8 ...) => (VPHSUBW128 ...) -(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) -(PairwiseSubInt32x4 ...) => (VPHSUBD128 ...) -(PairwiseSubInt32x8 ...) => (VPHSUBD256 ...) -(PairwiseSubUint16x8 ...) => (VPHSUBW128 ...) -(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) -(PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) -(PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) (PermuteFloat32x8 ...) => (VPERMPS256 ...) (PermuteFloat32x16 ...) => (VPERMPS512 ...) (PermuteFloat64x4 ...) => (VPERMPD256 ...) @@ -1295,76 +1273,36 @@ (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) (RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) -(RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) -(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) -(RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) -(RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) -(RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) -(RoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(SaturatedAddInt8x16 ...) => (VPADDSB128 ...) -(SaturatedAddInt8x32 ...) => (VPADDSB256 ...) -(SaturatedAddInt8x64 ...) => (VPADDSB512 ...) -(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) -(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) -(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) -(SaturatedAddUint8x16 ...) => (VPADDSB128 ...) -(SaturatedAddUint8x32 ...) => (VPADDSB256 ...) -(SaturatedAddUint8x64 ...) => (VPADDSB512 ...) -(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) -(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) -(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(RoundScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) +(RoundScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) +(RoundScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) +(RoundScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) +(RoundScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) +(RoundScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) +(RoundScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) +(RoundScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(RoundScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) +(RoundScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) +(RoundScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) +(RoundScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(RoundScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (SaturatedAddDotProdInt32x4 ...) => (VPDPWSSDS128 ...) (SaturatedAddDotProdInt32x8 ...) => (VPDPWSSDS256 ...) (SaturatedAddDotProdInt32x16 ...) => (VPDPWSSDS512 ...) (SaturatedAddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedAddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedAddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(SaturatedAddMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedAddMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedAddMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedAddMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedAddMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedAddMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedAddMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedAddMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedAddMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedAddMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedAddMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedAddMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) -(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) -(SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) -(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) -(SaturatedSubInt8x16 ...) => (VPSUBSB128 ...) -(SaturatedSubInt8x32 ...) => (VPSUBSB256 ...) -(SaturatedSubInt8x64 ...) => (VPSUBSB512 ...) -(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) -(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) -(SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) -(SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) -(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) -(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedSubMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedSubMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedSubMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedSubMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedSubMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedSubMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedSubMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedSubMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedSubMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedSubMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedSubMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) @@ -1377,6 +1315,18 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(ScaleFloat32x4 ...) => (VSCALEFPS128 ...) +(ScaleFloat32x8 ...) => (VSCALEFPS256 ...) +(ScaleFloat32x16 ...) => (VSCALEFPS512 ...) +(ScaleFloat64x2 ...) => (VSCALEFPD128 ...) +(ScaleFloat64x4 ...) => (VSCALEFPD256 ...) +(ScaleFloat64x8 ...) => (VSCALEFPD512 ...) +(ScaleMaskedFloat32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) +(ScaleMaskedFloat32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(ScaleMaskedFloat32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) +(ScaleMaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) +(ScaleMaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) +(ScaleMaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) (Set128Float32x8 ...) => (VINSERTF128256 ...) (Set128Float64x4 ...) => (VINSERTF128256 ...) (Set128Int8x32 ...) => (VINSERTI128256 ...) @@ -1761,22 +1711,72 @@ (SubMaskedUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (SubMaskedUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (SubMaskedUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(SubPairsFloat32x4 ...) => (VHSUBPS128 ...) +(SubPairsFloat32x8 ...) => (VHSUBPS256 ...) +(SubPairsFloat64x2 ...) => (VHSUBPD128 ...) +(SubPairsFloat64x4 ...) => (VHSUBPD256 ...) +(SubPairsInt16x8 ...) => (VPHSUBW128 ...) +(SubPairsInt16x16 ...) => (VPHSUBW256 ...) +(SubPairsInt32x4 ...) => (VPHSUBD128 ...) +(SubPairsInt32x8 ...) => (VPHSUBD256 ...) +(SubPairsUint16x8 ...) => (VPHSUBW128 ...) +(SubPairsUint16x16 ...) => (VPHSUBW256 ...) +(SubPairsUint32x4 ...) => (VPHSUBD128 ...) +(SubPairsUint32x8 ...) => (VPHSUBD256 ...) +(SubPairsSaturatedInt16x8 ...) => (VPHSUBSW128 ...) +(SubPairsSaturatedInt16x16 ...) => (VPHSUBSW256 ...) +(SubSaturatedInt8x16 ...) => (VPSUBSB128 ...) +(SubSaturatedInt8x32 ...) => (VPSUBSB256 ...) +(SubSaturatedInt8x64 ...) => (VPSUBSB512 ...) +(SubSaturatedInt16x8 ...) => (VPSUBSW128 ...) +(SubSaturatedInt16x16 ...) => (VPSUBSW256 ...) +(SubSaturatedInt16x32 ...) => (VPSUBSW512 ...) +(SubSaturatedUint8x16 ...) => (VPSUBSB128 ...) +(SubSaturatedUint8x32 ...) => (VPSUBSB256 ...) +(SubSaturatedUint8x64 ...) => (VPSUBSB512 ...) +(SubSaturatedUint16x8 ...) => (VPSUBSW128 ...) +(SubSaturatedUint16x16 ...) => (VPSUBSW256 ...) +(SubSaturatedUint16x32 ...) => (VPSUBSW512 ...) +(SubSaturatedMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubSaturatedMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubSaturatedMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubSaturatedMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubSaturatedMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubSaturatedMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubSaturatedMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubSaturatedMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubSaturatedMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubSaturatedMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubSaturatedMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubSaturatedMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) (TruncFloat64x4 x) => (VROUNDPD256 [3] x) -(TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) -(TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) -(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) -(TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) -(TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) -(TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) -(TruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(TruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(TruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(TruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(TruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(TruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) +(TruncScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) +(TruncScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) +(TruncScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) +(TruncScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) +(TruncScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) +(TruncScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) +(TruncScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(TruncScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(TruncScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(TruncScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(TruncScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(TruncScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) +(TruncScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) +(TruncScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) +(TruncScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) +(TruncScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) +(TruncScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) +(TruncScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) +(TruncScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(TruncScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(TruncScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(TruncScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(TruncScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(TruncScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 492a994e9363cf..ea52254413f792 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -81,6 +81,44 @@ func simdGenericOps() []opData { {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, + {name: "AddPairsFloat32x4", argLength: 2, commutative: false}, + {name: "AddPairsFloat32x8", argLength: 2, commutative: false}, + {name: "AddPairsFloat64x2", argLength: 2, commutative: false}, + {name: "AddPairsFloat64x4", argLength: 2, commutative: false}, + {name: "AddPairsInt16x8", argLength: 2, commutative: false}, + {name: "AddPairsInt16x16", argLength: 2, commutative: false}, + {name: "AddPairsInt32x4", argLength: 2, commutative: false}, + {name: "AddPairsInt32x8", argLength: 2, commutative: false}, + {name: "AddPairsSaturatedInt16x8", argLength: 2, commutative: false}, + {name: "AddPairsSaturatedInt16x16", argLength: 2, commutative: false}, + {name: "AddPairsUint16x8", argLength: 2, commutative: false}, + {name: "AddPairsUint16x16", argLength: 2, commutative: false}, + {name: "AddPairsUint32x4", argLength: 2, commutative: false}, + {name: "AddPairsUint32x8", argLength: 2, commutative: false}, + {name: "AddSaturatedInt8x16", argLength: 2, commutative: true}, + {name: "AddSaturatedInt8x32", argLength: 2, commutative: true}, + {name: "AddSaturatedInt8x64", argLength: 2, commutative: true}, + {name: "AddSaturatedInt16x8", argLength: 2, commutative: true}, + {name: "AddSaturatedInt16x16", argLength: 2, commutative: true}, + {name: "AddSaturatedInt16x32", argLength: 2, commutative: true}, + {name: "AddSaturatedMaskedInt8x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt8x32", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt8x64", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt16x8", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt16x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt16x32", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint8x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint8x32", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint8x64", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint16x8", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint16x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint16x32", argLength: 3, commutative: true}, + {name: "AddSaturatedUint8x16", argLength: 2, commutative: true}, + {name: "AddSaturatedUint8x32", argLength: 2, commutative: true}, + {name: "AddSaturatedUint8x64", argLength: 2, commutative: true}, + {name: "AddSaturatedUint16x8", argLength: 2, commutative: true}, + {name: "AddSaturatedUint16x16", argLength: 2, commutative: true}, + {name: "AddSaturatedUint16x32", argLength: 2, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, @@ -744,18 +782,6 @@ func simdGenericOps() []opData { {name: "MinUint64x2", argLength: 2, commutative: true}, {name: "MinUint64x4", argLength: 2, commutative: true}, {name: "MinUint64x8", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x8", argLength: 3, commutative: false}, {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, @@ -790,30 +816,30 @@ func simdGenericOps() []opData { {name: "MulHighUint16x8", argLength: 2, commutative: true}, {name: "MulHighUint16x16", argLength: 2, commutative: true}, {name: "MulHighUint16x32", argLength: 2, commutative: true}, - {name: "MulLowInt16x8", argLength: 2, commutative: true}, - {name: "MulLowInt16x16", argLength: 2, commutative: true}, - {name: "MulLowInt16x32", argLength: 2, commutative: true}, - {name: "MulLowInt32x4", argLength: 2, commutative: true}, - {name: "MulLowInt32x8", argLength: 2, commutative: true}, - {name: "MulLowInt32x16", argLength: 2, commutative: true}, - {name: "MulLowInt64x2", argLength: 2, commutative: true}, - {name: "MulLowInt64x4", argLength: 2, commutative: true}, - {name: "MulLowInt64x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulInt16x8", argLength: 2, commutative: true}, + {name: "MulInt16x16", argLength: 2, commutative: true}, + {name: "MulInt16x32", argLength: 2, commutative: true}, + {name: "MulInt32x4", argLength: 2, commutative: true}, + {name: "MulInt32x8", argLength: 2, commutative: true}, + {name: "MulInt32x16", argLength: 2, commutative: true}, + {name: "MulInt64x2", argLength: 2, commutative: true}, + {name: "MulInt64x4", argLength: 2, commutative: true}, + {name: "MulInt64x8", argLength: 2, commutative: true}, {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "MulMaskedInt16x8", argLength: 3, commutative: true}, + {name: "MulMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MulMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MulMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MulMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MulMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MulMaskedInt64x8", argLength: 3, commutative: true}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, @@ -916,30 +942,6 @@ func simdGenericOps() []opData { {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, - {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, - {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, - {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, - {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, - {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, - {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, - {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, - {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, - {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, - {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Float32x8", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, @@ -1154,58 +1156,6 @@ func simdGenericOps() []opData { {name: "SaturatedAddDotProdMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedAddDotProdMaskedInt32x8", argLength: 4, commutative: false}, {name: "SaturatedAddDotProdMaskedInt32x16", argLength: 4, commutative: false}, - {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, - {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, - {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, - {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, @@ -1218,6 +1168,18 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ScaleFloat32x4", argLength: 2, commutative: false}, + {name: "ScaleFloat32x8", argLength: 2, commutative: false}, + {name: "ScaleFloat32x16", argLength: 2, commutative: false}, + {name: "ScaleFloat64x2", argLength: 2, commutative: false}, + {name: "ScaleFloat64x4", argLength: 2, commutative: false}, + {name: "ScaleFloat64x8", argLength: 2, commutative: false}, + {name: "ScaleMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat64x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, @@ -1500,6 +1462,44 @@ func simdGenericOps() []opData { {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, + {name: "SubPairsFloat32x4", argLength: 2, commutative: false}, + {name: "SubPairsFloat32x8", argLength: 2, commutative: false}, + {name: "SubPairsFloat64x2", argLength: 2, commutative: false}, + {name: "SubPairsFloat64x4", argLength: 2, commutative: false}, + {name: "SubPairsInt16x8", argLength: 2, commutative: false}, + {name: "SubPairsInt16x16", argLength: 2, commutative: false}, + {name: "SubPairsInt32x4", argLength: 2, commutative: false}, + {name: "SubPairsInt32x8", argLength: 2, commutative: false}, + {name: "SubPairsSaturatedInt16x8", argLength: 2, commutative: false}, + {name: "SubPairsSaturatedInt16x16", argLength: 2, commutative: false}, + {name: "SubPairsUint16x8", argLength: 2, commutative: false}, + {name: "SubPairsUint16x16", argLength: 2, commutative: false}, + {name: "SubPairsUint32x4", argLength: 2, commutative: false}, + {name: "SubPairsUint32x8", argLength: 2, commutative: false}, + {name: "SubSaturatedInt8x16", argLength: 2, commutative: false}, + {name: "SubSaturatedInt8x32", argLength: 2, commutative: false}, + {name: "SubSaturatedInt8x64", argLength: 2, commutative: false}, + {name: "SubSaturatedInt16x8", argLength: 2, commutative: false}, + {name: "SubSaturatedInt16x16", argLength: 2, commutative: false}, + {name: "SubSaturatedInt16x32", argLength: 2, commutative: false}, + {name: "SubSaturatedMaskedInt8x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt8x32", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt8x64", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt16x8", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt16x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt16x32", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint16x8", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint16x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint16x32", argLength: 3, commutative: false}, + {name: "SubSaturatedUint8x16", argLength: 2, commutative: false}, + {name: "SubSaturatedUint8x32", argLength: 2, commutative: false}, + {name: "SubSaturatedUint8x64", argLength: 2, commutative: false}, + {name: "SubSaturatedUint16x8", argLength: 2, commutative: false}, + {name: "SubSaturatedUint16x16", argLength: 2, commutative: false}, + {name: "SubSaturatedUint16x32", argLength: 2, commutative: false}, {name: "SubUint8x16", argLength: 2, commutative: false}, {name: "SubUint8x32", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, @@ -1558,78 +1558,54 @@ func simdGenericOps() []opData { {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "XorUint64x8", argLength: 2, commutative: true}, - {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, @@ -1708,18 +1684,30 @@ func simdGenericOps() []opData { {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, @@ -1810,17 +1798,29 @@ func simdGenericOps() []opData { {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index e8a5354c00130b..6dcbec2573b882 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4567,6 +4567,44 @@ const ( OpAddMaskedUint64x2 OpAddMaskedUint64x4 OpAddMaskedUint64x8 + OpAddPairsFloat32x4 + OpAddPairsFloat32x8 + OpAddPairsFloat64x2 + OpAddPairsFloat64x4 + OpAddPairsInt16x8 + OpAddPairsInt16x16 + OpAddPairsInt32x4 + OpAddPairsInt32x8 + OpAddPairsSaturatedInt16x8 + OpAddPairsSaturatedInt16x16 + OpAddPairsUint16x8 + OpAddPairsUint16x16 + OpAddPairsUint32x4 + OpAddPairsUint32x8 + OpAddSaturatedInt8x16 + OpAddSaturatedInt8x32 + OpAddSaturatedInt8x64 + OpAddSaturatedInt16x8 + OpAddSaturatedInt16x16 + OpAddSaturatedInt16x32 + OpAddSaturatedMaskedInt8x16 + OpAddSaturatedMaskedInt8x32 + OpAddSaturatedMaskedInt8x64 + OpAddSaturatedMaskedInt16x8 + OpAddSaturatedMaskedInt16x16 + OpAddSaturatedMaskedInt16x32 + OpAddSaturatedMaskedUint8x16 + OpAddSaturatedMaskedUint8x32 + OpAddSaturatedMaskedUint8x64 + OpAddSaturatedMaskedUint16x8 + OpAddSaturatedMaskedUint16x16 + OpAddSaturatedMaskedUint16x32 + OpAddSaturatedUint8x16 + OpAddSaturatedUint8x32 + OpAddSaturatedUint8x64 + OpAddSaturatedUint16x8 + OpAddSaturatedUint16x16 + OpAddSaturatedUint16x32 OpAddSubFloat32x4 OpAddSubFloat32x8 OpAddSubFloat64x2 @@ -5230,18 +5268,6 @@ const ( OpMinUint64x2 OpMinUint64x4 OpMinUint64x8 - OpMulByPowOf2Float32x4 - OpMulByPowOf2Float32x8 - OpMulByPowOf2Float32x16 - OpMulByPowOf2Float64x2 - OpMulByPowOf2Float64x4 - OpMulByPowOf2Float64x8 - OpMulByPowOf2MaskedFloat32x4 - OpMulByPowOf2MaskedFloat32x8 - OpMulByPowOf2MaskedFloat32x16 - OpMulByPowOf2MaskedFloat64x2 - OpMulByPowOf2MaskedFloat64x4 - OpMulByPowOf2MaskedFloat64x8 OpMulEvenWidenInt32x4 OpMulEvenWidenInt32x8 OpMulEvenWidenInt64x2 @@ -5276,30 +5302,30 @@ const ( OpMulHighUint16x8 OpMulHighUint16x16 OpMulHighUint16x32 - OpMulLowInt16x8 - OpMulLowInt16x16 - OpMulLowInt16x32 - OpMulLowInt32x4 - OpMulLowInt32x8 - OpMulLowInt32x16 - OpMulLowInt64x2 - OpMulLowInt64x4 - OpMulLowInt64x8 - OpMulLowMaskedInt16x8 - OpMulLowMaskedInt16x16 - OpMulLowMaskedInt16x32 - OpMulLowMaskedInt32x4 - OpMulLowMaskedInt32x8 - OpMulLowMaskedInt32x16 - OpMulLowMaskedInt64x2 - OpMulLowMaskedInt64x4 - OpMulLowMaskedInt64x8 + OpMulInt16x8 + OpMulInt16x16 + OpMulInt16x32 + OpMulInt32x4 + OpMulInt32x8 + OpMulInt32x16 + OpMulInt64x2 + OpMulInt64x4 + OpMulInt64x8 OpMulMaskedFloat32x4 OpMulMaskedFloat32x8 OpMulMaskedFloat32x16 OpMulMaskedFloat64x2 OpMulMaskedFloat64x4 OpMulMaskedFloat64x8 + OpMulMaskedInt16x8 + OpMulMaskedInt16x16 + OpMulMaskedInt16x32 + OpMulMaskedInt32x4 + OpMulMaskedInt32x8 + OpMulMaskedInt32x16 + OpMulMaskedInt64x2 + OpMulMaskedInt64x4 + OpMulMaskedInt64x8 OpNotEqualFloat32x4 OpNotEqualFloat32x8 OpNotEqualFloat32x16 @@ -5402,30 +5428,6 @@ const ( OpPairDotProdMaskedInt16x8 OpPairDotProdMaskedInt16x16 OpPairDotProdMaskedInt16x32 - OpPairwiseAddFloat32x4 - OpPairwiseAddFloat32x8 - OpPairwiseAddFloat64x2 - OpPairwiseAddFloat64x4 - OpPairwiseAddInt16x8 - OpPairwiseAddInt16x16 - OpPairwiseAddInt32x4 - OpPairwiseAddInt32x8 - OpPairwiseAddUint16x8 - OpPairwiseAddUint16x16 - OpPairwiseAddUint32x4 - OpPairwiseAddUint32x8 - OpPairwiseSubFloat32x4 - OpPairwiseSubFloat32x8 - OpPairwiseSubFloat64x2 - OpPairwiseSubFloat64x4 - OpPairwiseSubInt16x8 - OpPairwiseSubInt16x16 - OpPairwiseSubInt32x4 - OpPairwiseSubInt32x8 - OpPairwiseSubUint16x8 - OpPairwiseSubUint16x16 - OpPairwiseSubUint32x4 - OpPairwiseSubUint32x8 OpPermute2Float32x4 OpPermute2Float32x8 OpPermute2Float32x16 @@ -5640,58 +5642,6 @@ const ( OpSaturatedAddDotProdMaskedInt32x4 OpSaturatedAddDotProdMaskedInt32x8 OpSaturatedAddDotProdMaskedInt32x16 - OpSaturatedAddInt8x16 - OpSaturatedAddInt8x32 - OpSaturatedAddInt8x64 - OpSaturatedAddInt16x8 - OpSaturatedAddInt16x16 - OpSaturatedAddInt16x32 - OpSaturatedAddMaskedInt8x16 - OpSaturatedAddMaskedInt8x32 - OpSaturatedAddMaskedInt8x64 - OpSaturatedAddMaskedInt16x8 - OpSaturatedAddMaskedInt16x16 - OpSaturatedAddMaskedInt16x32 - OpSaturatedAddMaskedUint8x16 - OpSaturatedAddMaskedUint8x32 - OpSaturatedAddMaskedUint8x64 - OpSaturatedAddMaskedUint16x8 - OpSaturatedAddMaskedUint16x16 - OpSaturatedAddMaskedUint16x32 - OpSaturatedAddUint8x16 - OpSaturatedAddUint8x32 - OpSaturatedAddUint8x64 - OpSaturatedAddUint16x8 - OpSaturatedAddUint16x16 - OpSaturatedAddUint16x32 - OpSaturatedPairwiseAddInt16x8 - OpSaturatedPairwiseAddInt16x16 - OpSaturatedPairwiseSubInt16x8 - OpSaturatedPairwiseSubInt16x16 - OpSaturatedSubInt8x16 - OpSaturatedSubInt8x32 - OpSaturatedSubInt8x64 - OpSaturatedSubInt16x8 - OpSaturatedSubInt16x16 - OpSaturatedSubInt16x32 - OpSaturatedSubMaskedInt8x16 - OpSaturatedSubMaskedInt8x32 - OpSaturatedSubMaskedInt8x64 - OpSaturatedSubMaskedInt16x8 - OpSaturatedSubMaskedInt16x16 - OpSaturatedSubMaskedInt16x32 - OpSaturatedSubMaskedUint8x16 - OpSaturatedSubMaskedUint8x32 - OpSaturatedSubMaskedUint8x64 - OpSaturatedSubMaskedUint16x8 - OpSaturatedSubMaskedUint16x16 - OpSaturatedSubMaskedUint16x32 - OpSaturatedSubUint8x16 - OpSaturatedSubUint8x32 - OpSaturatedSubUint8x64 - OpSaturatedSubUint16x8 - OpSaturatedSubUint16x16 - OpSaturatedSubUint16x32 OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 @@ -5704,6 +5654,18 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpScaleFloat32x4 + OpScaleFloat32x8 + OpScaleFloat32x16 + OpScaleFloat64x2 + OpScaleFloat64x4 + OpScaleFloat64x8 + OpScaleMaskedFloat32x4 + OpScaleMaskedFloat32x8 + OpScaleMaskedFloat32x16 + OpScaleMaskedFloat64x2 + OpScaleMaskedFloat64x4 + OpScaleMaskedFloat64x8 OpShiftAllLeftInt16x8 OpShiftAllLeftInt16x16 OpShiftAllLeftInt16x32 @@ -5986,6 +5948,44 @@ const ( OpSubMaskedUint64x2 OpSubMaskedUint64x4 OpSubMaskedUint64x8 + OpSubPairsFloat32x4 + OpSubPairsFloat32x8 + OpSubPairsFloat64x2 + OpSubPairsFloat64x4 + OpSubPairsInt16x8 + OpSubPairsInt16x16 + OpSubPairsInt32x4 + OpSubPairsInt32x8 + OpSubPairsSaturatedInt16x8 + OpSubPairsSaturatedInt16x16 + OpSubPairsUint16x8 + OpSubPairsUint16x16 + OpSubPairsUint32x4 + OpSubPairsUint32x8 + OpSubSaturatedInt8x16 + OpSubSaturatedInt8x32 + OpSubSaturatedInt8x64 + OpSubSaturatedInt16x8 + OpSubSaturatedInt16x16 + OpSubSaturatedInt16x32 + OpSubSaturatedMaskedInt8x16 + OpSubSaturatedMaskedInt8x32 + OpSubSaturatedMaskedInt8x64 + OpSubSaturatedMaskedInt16x8 + OpSubSaturatedMaskedInt16x16 + OpSubSaturatedMaskedInt16x32 + OpSubSaturatedMaskedUint8x16 + OpSubSaturatedMaskedUint8x32 + OpSubSaturatedMaskedUint8x64 + OpSubSaturatedMaskedUint16x8 + OpSubSaturatedMaskedUint16x16 + OpSubSaturatedMaskedUint16x32 + OpSubSaturatedUint8x16 + OpSubSaturatedUint8x32 + OpSubSaturatedUint8x64 + OpSubSaturatedUint16x8 + OpSubSaturatedUint16x16 + OpSubSaturatedUint16x32 OpSubUint8x16 OpSubUint8x32 OpSubUint8x64 @@ -6044,78 +6044,54 @@ const ( OpXorUint64x2 OpXorUint64x4 OpXorUint64x8 - OpCeilWithPrecisionFloat32x4 - OpCeilWithPrecisionFloat32x8 - OpCeilWithPrecisionFloat32x16 - OpCeilWithPrecisionFloat64x2 - OpCeilWithPrecisionFloat64x4 - OpCeilWithPrecisionFloat64x8 - OpCeilWithPrecisionMaskedFloat32x4 - OpCeilWithPrecisionMaskedFloat32x8 - OpCeilWithPrecisionMaskedFloat32x16 - OpCeilWithPrecisionMaskedFloat64x2 - OpCeilWithPrecisionMaskedFloat64x4 - OpCeilWithPrecisionMaskedFloat64x8 - OpDiffWithCeilWithPrecisionFloat32x4 - OpDiffWithCeilWithPrecisionFloat32x8 - OpDiffWithCeilWithPrecisionFloat32x16 - OpDiffWithCeilWithPrecisionFloat64x2 - OpDiffWithCeilWithPrecisionFloat64x4 - OpDiffWithCeilWithPrecisionFloat64x8 - OpDiffWithCeilWithPrecisionMaskedFloat32x4 - OpDiffWithCeilWithPrecisionMaskedFloat32x8 - OpDiffWithCeilWithPrecisionMaskedFloat32x16 - OpDiffWithCeilWithPrecisionMaskedFloat64x2 - OpDiffWithCeilWithPrecisionMaskedFloat64x4 - OpDiffWithCeilWithPrecisionMaskedFloat64x8 - OpDiffWithFloorWithPrecisionFloat32x4 - OpDiffWithFloorWithPrecisionFloat32x8 - OpDiffWithFloorWithPrecisionFloat32x16 - OpDiffWithFloorWithPrecisionFloat64x2 - OpDiffWithFloorWithPrecisionFloat64x4 - OpDiffWithFloorWithPrecisionFloat64x8 - OpDiffWithFloorWithPrecisionMaskedFloat32x4 - OpDiffWithFloorWithPrecisionMaskedFloat32x8 - OpDiffWithFloorWithPrecisionMaskedFloat32x16 - OpDiffWithFloorWithPrecisionMaskedFloat64x2 - OpDiffWithFloorWithPrecisionMaskedFloat64x4 - OpDiffWithFloorWithPrecisionMaskedFloat64x8 - OpDiffWithRoundWithPrecisionFloat32x4 - OpDiffWithRoundWithPrecisionFloat32x8 - OpDiffWithRoundWithPrecisionFloat32x16 - OpDiffWithRoundWithPrecisionFloat64x2 - OpDiffWithRoundWithPrecisionFloat64x4 - OpDiffWithRoundWithPrecisionFloat64x8 - OpDiffWithRoundWithPrecisionMaskedFloat32x4 - OpDiffWithRoundWithPrecisionMaskedFloat32x8 - OpDiffWithRoundWithPrecisionMaskedFloat32x16 - OpDiffWithRoundWithPrecisionMaskedFloat64x2 - OpDiffWithRoundWithPrecisionMaskedFloat64x4 - OpDiffWithRoundWithPrecisionMaskedFloat64x8 - OpDiffWithTruncWithPrecisionFloat32x4 - OpDiffWithTruncWithPrecisionFloat32x8 - OpDiffWithTruncWithPrecisionFloat32x16 - OpDiffWithTruncWithPrecisionFloat64x2 - OpDiffWithTruncWithPrecisionFloat64x4 - OpDiffWithTruncWithPrecisionFloat64x8 - OpDiffWithTruncWithPrecisionMaskedFloat32x4 - OpDiffWithTruncWithPrecisionMaskedFloat32x8 - OpDiffWithTruncWithPrecisionMaskedFloat32x16 - OpDiffWithTruncWithPrecisionMaskedFloat64x2 - OpDiffWithTruncWithPrecisionMaskedFloat64x4 - OpDiffWithTruncWithPrecisionMaskedFloat64x8 - OpFloorWithPrecisionFloat32x4 - OpFloorWithPrecisionFloat32x8 - OpFloorWithPrecisionFloat32x16 - OpFloorWithPrecisionFloat64x2 - OpFloorWithPrecisionFloat64x4 - OpFloorWithPrecisionFloat64x8 - OpFloorWithPrecisionMaskedFloat32x4 - OpFloorWithPrecisionMaskedFloat32x8 - OpFloorWithPrecisionMaskedFloat32x16 - OpFloorWithPrecisionMaskedFloat64x2 - OpFloorWithPrecisionMaskedFloat64x4 - OpFloorWithPrecisionMaskedFloat64x8 + OpCeilScaledFloat32x4 + OpCeilScaledFloat32x8 + OpCeilScaledFloat32x16 + OpCeilScaledFloat64x2 + OpCeilScaledFloat64x4 + OpCeilScaledFloat64x8 + OpCeilScaledMaskedFloat32x4 + OpCeilScaledMaskedFloat32x8 + OpCeilScaledMaskedFloat32x16 + OpCeilScaledMaskedFloat64x2 + OpCeilScaledMaskedFloat64x4 + OpCeilScaledMaskedFloat64x8 + OpCeilScaledResidueFloat32x4 + OpCeilScaledResidueFloat32x8 + OpCeilScaledResidueFloat32x16 + OpCeilScaledResidueFloat64x2 + OpCeilScaledResidueFloat64x4 + OpCeilScaledResidueFloat64x8 + OpCeilScaledResidueMaskedFloat32x4 + OpCeilScaledResidueMaskedFloat32x8 + OpCeilScaledResidueMaskedFloat32x16 + OpCeilScaledResidueMaskedFloat64x2 + OpCeilScaledResidueMaskedFloat64x4 + OpCeilScaledResidueMaskedFloat64x8 + OpFloorScaledFloat32x4 + OpFloorScaledFloat32x8 + OpFloorScaledFloat32x16 + OpFloorScaledFloat64x2 + OpFloorScaledFloat64x4 + OpFloorScaledFloat64x8 + OpFloorScaledMaskedFloat32x4 + OpFloorScaledMaskedFloat32x8 + OpFloorScaledMaskedFloat32x16 + OpFloorScaledMaskedFloat64x2 + OpFloorScaledMaskedFloat64x4 + OpFloorScaledMaskedFloat64x8 + OpFloorScaledResidueFloat32x4 + OpFloorScaledResidueFloat32x8 + OpFloorScaledResidueFloat32x16 + OpFloorScaledResidueFloat64x2 + OpFloorScaledResidueFloat64x4 + OpFloorScaledResidueFloat64x8 + OpFloorScaledResidueMaskedFloat32x4 + OpFloorScaledResidueMaskedFloat32x8 + OpFloorScaledResidueMaskedFloat32x16 + OpFloorScaledResidueMaskedFloat64x2 + OpFloorScaledResidueMaskedFloat64x4 + OpFloorScaledResidueMaskedFloat64x8 OpGaloisFieldAffineTransformInverseMaskedUint8x16 OpGaloisFieldAffineTransformInverseMaskedUint8x32 OpGaloisFieldAffineTransformInverseMaskedUint8x64 @@ -6194,18 +6170,30 @@ const ( OpRotateAllRightUint64x2 OpRotateAllRightUint64x4 OpRotateAllRightUint64x8 - OpRoundWithPrecisionFloat32x4 - OpRoundWithPrecisionFloat32x8 - OpRoundWithPrecisionFloat32x16 - OpRoundWithPrecisionFloat64x2 - OpRoundWithPrecisionFloat64x4 - OpRoundWithPrecisionFloat64x8 - OpRoundWithPrecisionMaskedFloat32x4 - OpRoundWithPrecisionMaskedFloat32x8 - OpRoundWithPrecisionMaskedFloat32x16 - OpRoundWithPrecisionMaskedFloat64x2 - OpRoundWithPrecisionMaskedFloat64x4 - OpRoundWithPrecisionMaskedFloat64x8 + OpRoundScaledFloat32x4 + OpRoundScaledFloat32x8 + OpRoundScaledFloat32x16 + OpRoundScaledFloat64x2 + OpRoundScaledFloat64x4 + OpRoundScaledFloat64x8 + OpRoundScaledMaskedFloat32x4 + OpRoundScaledMaskedFloat32x8 + OpRoundScaledMaskedFloat32x16 + OpRoundScaledMaskedFloat64x2 + OpRoundScaledMaskedFloat64x4 + OpRoundScaledMaskedFloat64x8 + OpRoundScaledResidueFloat32x4 + OpRoundScaledResidueFloat32x8 + OpRoundScaledResidueFloat32x16 + OpRoundScaledResidueFloat64x2 + OpRoundScaledResidueFloat64x4 + OpRoundScaledResidueFloat64x8 + OpRoundScaledResidueMaskedFloat32x4 + OpRoundScaledResidueMaskedFloat32x8 + OpRoundScaledResidueMaskedFloat32x16 + OpRoundScaledResidueMaskedFloat64x2 + OpRoundScaledResidueMaskedFloat64x4 + OpRoundScaledResidueMaskedFloat64x8 OpSet128Float32x8 OpSet128Float64x4 OpSet128Int8x32 @@ -6296,18 +6284,30 @@ const ( OpShiftAllRightConcatUint64x2 OpShiftAllRightConcatUint64x4 OpShiftAllRightConcatUint64x8 - OpTruncWithPrecisionFloat32x4 - OpTruncWithPrecisionFloat32x8 - OpTruncWithPrecisionFloat32x16 - OpTruncWithPrecisionFloat64x2 - OpTruncWithPrecisionFloat64x4 - OpTruncWithPrecisionFloat64x8 - OpTruncWithPrecisionMaskedFloat32x4 - OpTruncWithPrecisionMaskedFloat32x8 - OpTruncWithPrecisionMaskedFloat32x16 - OpTruncWithPrecisionMaskedFloat64x2 - OpTruncWithPrecisionMaskedFloat64x4 - OpTruncWithPrecisionMaskedFloat64x8 + OpTruncScaledFloat32x4 + OpTruncScaledFloat32x8 + OpTruncScaledFloat32x16 + OpTruncScaledFloat64x2 + OpTruncScaledFloat64x4 + OpTruncScaledFloat64x8 + OpTruncScaledMaskedFloat32x4 + OpTruncScaledMaskedFloat32x8 + OpTruncScaledMaskedFloat32x16 + OpTruncScaledMaskedFloat64x2 + OpTruncScaledMaskedFloat64x4 + OpTruncScaledMaskedFloat64x8 + OpTruncScaledResidueFloat32x4 + OpTruncScaledResidueFloat32x8 + OpTruncScaledResidueFloat32x16 + OpTruncScaledResidueFloat64x2 + OpTruncScaledResidueFloat64x4 + OpTruncScaledResidueFloat64x8 + OpTruncScaledResidueMaskedFloat32x4 + OpTruncScaledResidueMaskedFloat32x8 + OpTruncScaledResidueMaskedFloat32x16 + OpTruncScaledResidueMaskedFloat64x2 + OpTruncScaledResidueMaskedFloat64x4 + OpTruncScaledResidueMaskedFloat64x8 ) var opcodeTable = [...]opInfo{ @@ -62123,6 +62123,220 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddPairsFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "AddPairsFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt16x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt16x16", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt32x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt32x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsSaturatedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsSaturatedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint16x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint16x16", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint32x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint32x8", + argLen: 2, + generic: true, + }, + { + name: "AddSaturatedInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AddSubFloat32x4", argLen: 2, @@ -65693,66 +65907,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float32x8", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float32x16", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float64x2", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float64x4", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float64x8", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat64x8", - argLen: 3, - generic: true, - }, { name: "MulEvenWidenInt32x4", argLen: 2, @@ -65958,113 +66112,59 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MulLowInt16x8", + name: "MulInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x16", + name: "MulInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x32", + name: "MulInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x4", + name: "MulInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x8", + name: "MulInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x16", + name: "MulInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x2", + name: "MulInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x4", + name: "MulInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", + name: "MulInt64x8", argLen: 2, commutative: true, generic: true, }, - { - name: "MulLowMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MulMaskedFloat32x4", argLen: 3, @@ -66101,6 +66201,60 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualFloat32x4", argLen: 2, @@ -66707,126 +66861,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "PairwiseAddFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x8", - argLen: 2, - generic: true, - }, { name: "Permute2Float32x4", argLen: 3, @@ -67898,349 +67932,125 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SaturatedAddInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdUint8x32", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 2, + generic: true, }, { - name: "SaturatedAddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, }, { - name: "SaturatedAddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, }, { - name: "SaturatedAddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "SaturatedAddUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, }, { - name: "SaturatedAddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "SaturatedAddUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x8", + name: "ScaleFloat32x4", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseAddInt16x16", + name: "ScaleFloat32x8", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x8", + name: "ScaleFloat32x16", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x16", + name: "ScaleFloat64x2", argLen: 2, generic: true, }, { - name: "SaturatedSubInt8x16", + name: "ScaleFloat64x4", argLen: 2, generic: true, }, { - name: "SaturatedSubInt8x32", + name: "ScaleFloat64x8", argLen: 2, generic: true, }, { - name: "SaturatedSubInt8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubInt16x8", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubInt16x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubMaskedInt8x16", + name: "ScaleMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt8x32", + name: "ScaleMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt8x64", + name: "ScaleMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x8", + name: "ScaleMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x16", + name: "ScaleMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x32", + name: "ScaleMaskedFloat64x8", argLen: 3, generic: true, }, - { - name: "SaturatedSubMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint16x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubUint8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint16x8", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint16x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint16x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftInt16x8", argLen: 2, @@ -69651,6 +69461,196 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "SubPairsFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "SubPairsFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsSaturatedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsSaturatedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint16x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint32x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedMaskedInt8x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt8x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt8x64", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt16x8", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt16x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint8x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint8x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint8x64", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint16x8", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint16x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint8x32", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint8x64", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint16x32", + argLen: 2, + generic: true, + }, { name: "SubUint8x16", argLen: 2, @@ -69978,433 +69978,289 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "CeilWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "CeilScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "CeilScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "CeilScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "CeilScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "CeilScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "CeilScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x4", + name: "CeilScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x8", + name: "CeilScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x16", + name: "CeilScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x2", + name: "CeilScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x4", + name: "CeilScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x8", + name: "CeilScaledMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "CeilScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x8", + name: "CeilScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "CeilScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "CeilScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "CeilScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "CeilScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x4", + name: "CeilScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x8", + name: "CeilScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x16", + name: "CeilScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x2", + name: "CeilScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x4", + name: "CeilScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x8", + name: "CeilScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "FloorScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "FloorScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "FloorScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "FloorScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "FloorScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "FloorScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x4", + name: "FloorScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x8", + name: "FloorScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x16", + name: "FloorScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x2", + name: "FloorScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x4", + name: "FloorScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x8", + name: "FloorScaledMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "FloorScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "FloorScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "FloorScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "FloorScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "FloorScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "FloorScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x4", + name: "FloorScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x8", + name: "FloorScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x16", + name: "FloorScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x2", + name: "FloorScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x4", + name: "FloorScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x8", + name: "FloorScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, @@ -70878,73 +70734,145 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RoundWithPrecisionFloat32x4", + name: "RoundScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat32x8", + name: "RoundScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat32x16", + name: "RoundScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x2", + name: "RoundScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x4", + name: "RoundScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x8", + name: "RoundScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x4", + name: "RoundScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x8", + name: "RoundScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x16", + name: "RoundScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x2", + name: "RoundScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x4", + name: "RoundScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x8", + name: "RoundScaledMaskedFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, @@ -71490,73 +71418,145 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "TruncScaledFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledMaskedFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "TruncScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "TruncScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "TruncScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "TruncScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "TruncScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x4", + name: "TruncScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x8", + name: "TruncScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x16", + name: "TruncScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x2", + name: "TruncScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x4", + name: "TruncScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x8", + name: "TruncScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 82f13b43c6ee82..a3a7ba7ed65aac 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -760,9 +760,111 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAddMaskedUint8x32(v) case OpAddMaskedUint8x64: return rewriteValueAMD64_OpAddMaskedUint8x64(v) + case OpAddPairsFloat32x4: + v.Op = OpAMD64VHADDPS128 + return true + case OpAddPairsFloat32x8: + v.Op = OpAMD64VHADDPS256 + return true + case OpAddPairsFloat64x2: + v.Op = OpAMD64VHADDPD128 + return true + case OpAddPairsFloat64x4: + v.Op = OpAMD64VHADDPD256 + return true + case OpAddPairsInt16x16: + v.Op = OpAMD64VPHADDW256 + return true + case OpAddPairsInt16x8: + v.Op = OpAMD64VPHADDW128 + return true + case OpAddPairsInt32x4: + v.Op = OpAMD64VPHADDD128 + return true + case OpAddPairsInt32x8: + v.Op = OpAMD64VPHADDD256 + return true + case OpAddPairsSaturatedInt16x16: + v.Op = OpAMD64VPHADDSW256 + return true + case OpAddPairsSaturatedInt16x8: + v.Op = OpAMD64VPHADDSW128 + return true + case OpAddPairsUint16x16: + v.Op = OpAMD64VPHADDW256 + return true + case OpAddPairsUint16x8: + v.Op = OpAMD64VPHADDW128 + return true + case OpAddPairsUint32x4: + v.Op = OpAMD64VPHADDD128 + return true + case OpAddPairsUint32x8: + v.Op = OpAMD64VPHADDD256 + return true case OpAddPtr: v.Op = OpAMD64ADDQ return true + case OpAddSaturatedInt16x16: + v.Op = OpAMD64VPADDSW256 + return true + case OpAddSaturatedInt16x32: + v.Op = OpAMD64VPADDSW512 + return true + case OpAddSaturatedInt16x8: + v.Op = OpAMD64VPADDSW128 + return true + case OpAddSaturatedInt8x16: + v.Op = OpAMD64VPADDSB128 + return true + case OpAddSaturatedInt8x32: + v.Op = OpAMD64VPADDSB256 + return true + case OpAddSaturatedInt8x64: + v.Op = OpAMD64VPADDSB512 + return true + case OpAddSaturatedMaskedInt16x16: + return rewriteValueAMD64_OpAddSaturatedMaskedInt16x16(v) + case OpAddSaturatedMaskedInt16x32: + return rewriteValueAMD64_OpAddSaturatedMaskedInt16x32(v) + case OpAddSaturatedMaskedInt16x8: + return rewriteValueAMD64_OpAddSaturatedMaskedInt16x8(v) + case OpAddSaturatedMaskedInt8x16: + return rewriteValueAMD64_OpAddSaturatedMaskedInt8x16(v) + case OpAddSaturatedMaskedInt8x32: + return rewriteValueAMD64_OpAddSaturatedMaskedInt8x32(v) + case OpAddSaturatedMaskedInt8x64: + return rewriteValueAMD64_OpAddSaturatedMaskedInt8x64(v) + case OpAddSaturatedMaskedUint16x16: + return rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v) + case OpAddSaturatedMaskedUint16x32: + return rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v) + case OpAddSaturatedMaskedUint16x8: + return rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v) + case OpAddSaturatedMaskedUint8x16: + return rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v) + case OpAddSaturatedMaskedUint8x32: + return rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v) + case OpAddSaturatedMaskedUint8x64: + return rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v) + case OpAddSaturatedUint16x16: + v.Op = OpAMD64VPADDSW256 + return true + case OpAddSaturatedUint16x32: + v.Op = OpAMD64VPADDSW512 + return true + case OpAddSaturatedUint16x8: + v.Op = OpAMD64VPADDSW128 + return true + case OpAddSaturatedUint8x16: + v.Op = OpAMD64VPADDSB128 + return true + case OpAddSaturatedUint8x32: + v.Op = OpAMD64VPADDSB256 + return true + case OpAddSaturatedUint8x64: + v.Op = OpAMD64VPADDSB512 + return true case OpAddSubFloat32x4: v.Op = OpAMD64VADDSUBPS128 return true @@ -1185,30 +1287,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilFloat64x2(v) case OpCeilFloat64x4: return rewriteValueAMD64_OpCeilFloat64x4(v) - case OpCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v) - case OpCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v) - case OpCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v) - case OpCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v) - case OpCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v) - case OpCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v) - case OpCeilWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v) - case OpCeilWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v) - case OpCeilWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v) - case OpCeilWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v) - case OpCeilWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v) - case OpCeilWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v) + case OpCeilScaledFloat32x16: + return rewriteValueAMD64_OpCeilScaledFloat32x16(v) + case OpCeilScaledFloat32x4: + return rewriteValueAMD64_OpCeilScaledFloat32x4(v) + case OpCeilScaledFloat32x8: + return rewriteValueAMD64_OpCeilScaledFloat32x8(v) + case OpCeilScaledFloat64x2: + return rewriteValueAMD64_OpCeilScaledFloat64x2(v) + case OpCeilScaledFloat64x4: + return rewriteValueAMD64_OpCeilScaledFloat64x4(v) + case OpCeilScaledFloat64x8: + return rewriteValueAMD64_OpCeilScaledFloat64x8(v) + case OpCeilScaledMaskedFloat32x16: + return rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v) + case OpCeilScaledMaskedFloat32x4: + return rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v) + case OpCeilScaledMaskedFloat32x8: + return rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v) + case OpCeilScaledMaskedFloat64x2: + return rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v) + case OpCeilScaledMaskedFloat64x4: + return rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v) + case OpCeilScaledMaskedFloat64x8: + return rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v) + case OpCeilScaledResidueFloat32x16: + return rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v) + case OpCeilScaledResidueFloat32x4: + return rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v) + case OpCeilScaledResidueFloat32x8: + return rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v) + case OpCeilScaledResidueFloat64x2: + return rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v) + case OpCeilScaledResidueFloat64x4: + return rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v) + case OpCeilScaledResidueFloat64x8: + return rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v) + case OpCeilScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v) + case OpCeilScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v) + case OpCeilScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v) + case OpCeilScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v) + case OpCeilScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v) + case OpCeilScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v) case OpClosureCall: v.Op = OpAMD64CALLclosure return true @@ -1409,102 +1535,6 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true - case OpDiffWithCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v) - case OpDiffWithCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v) - case OpDiffWithCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v) - case OpDiffWithCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v) - case OpDiffWithCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) - case OpDiffWithCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) - case OpDiffWithCeilWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v) - case OpDiffWithCeilWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v) - case OpDiffWithCeilWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v) - case OpDiffWithCeilWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v) - case OpDiffWithCeilWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v) - case OpDiffWithCeilWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v) - case OpDiffWithFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) - case OpDiffWithFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v) - case OpDiffWithFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v) - case OpDiffWithFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v) - case OpDiffWithFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) - case OpDiffWithFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) - case OpDiffWithFloorWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v) - case OpDiffWithFloorWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v) - case OpDiffWithFloorWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v) - case OpDiffWithFloorWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v) - case OpDiffWithFloorWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v) - case OpDiffWithFloorWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v) - case OpDiffWithRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) - case OpDiffWithRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v) - case OpDiffWithRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v) - case OpDiffWithRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v) - case OpDiffWithRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) - case OpDiffWithRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) - case OpDiffWithRoundWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v) - case OpDiffWithRoundWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v) - case OpDiffWithRoundWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v) - case OpDiffWithRoundWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v) - case OpDiffWithRoundWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v) - case OpDiffWithRoundWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v) - case OpDiffWithTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) - case OpDiffWithTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v) - case OpDiffWithTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v) - case OpDiffWithTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v) - case OpDiffWithTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v) - case OpDiffWithTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v) - case OpDiffWithTruncWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v) - case OpDiffWithTruncWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v) - case OpDiffWithTruncWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v) - case OpDiffWithTruncWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v) - case OpDiffWithTruncWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v) - case OpDiffWithTruncWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -1730,30 +1760,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorFloat64x2(v) case OpFloorFloat64x4: return rewriteValueAMD64_OpFloorFloat64x4(v) - case OpFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v) - case OpFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v) - case OpFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v) - case OpFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v) - case OpFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) - case OpFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) - case OpFloorWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v) - case OpFloorWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v) - case OpFloorWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v) - case OpFloorWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v) - case OpFloorWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v) - case OpFloorWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v) + case OpFloorScaledFloat32x16: + return rewriteValueAMD64_OpFloorScaledFloat32x16(v) + case OpFloorScaledFloat32x4: + return rewriteValueAMD64_OpFloorScaledFloat32x4(v) + case OpFloorScaledFloat32x8: + return rewriteValueAMD64_OpFloorScaledFloat32x8(v) + case OpFloorScaledFloat64x2: + return rewriteValueAMD64_OpFloorScaledFloat64x2(v) + case OpFloorScaledFloat64x4: + return rewriteValueAMD64_OpFloorScaledFloat64x4(v) + case OpFloorScaledFloat64x8: + return rewriteValueAMD64_OpFloorScaledFloat64x8(v) + case OpFloorScaledMaskedFloat32x16: + return rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v) + case OpFloorScaledMaskedFloat32x4: + return rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v) + case OpFloorScaledMaskedFloat32x8: + return rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v) + case OpFloorScaledMaskedFloat64x2: + return rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v) + case OpFloorScaledMaskedFloat64x4: + return rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v) + case OpFloorScaledMaskedFloat64x8: + return rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v) + case OpFloorScaledResidueFloat32x16: + return rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v) + case OpFloorScaledResidueFloat32x4: + return rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v) + case OpFloorScaledResidueFloat32x8: + return rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v) + case OpFloorScaledResidueFloat64x2: + return rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v) + case OpFloorScaledResidueFloat64x4: + return rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v) + case OpFloorScaledResidueFloat64x8: + return rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v) + case OpFloorScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v) + case OpFloorScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v) + case OpFloorScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v) + case OpFloorScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v) + case OpFloorScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v) + case OpFloorScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v) case OpFusedMultiplyAddFloat32x16: v.Op = OpAMD64VFMADD213PS512 return true @@ -2944,36 +2998,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMul8: v.Op = OpAMD64MULL return true - case OpMulByPowOf2Float32x16: - v.Op = OpAMD64VSCALEFPS512 - return true - case OpMulByPowOf2Float32x4: - v.Op = OpAMD64VSCALEFPS128 - return true - case OpMulByPowOf2Float32x8: - v.Op = OpAMD64VSCALEFPS256 - return true - case OpMulByPowOf2Float64x2: - v.Op = OpAMD64VSCALEFPD128 - return true - case OpMulByPowOf2Float64x4: - v.Op = OpAMD64VSCALEFPD256 - return true - case OpMulByPowOf2Float64x8: - v.Op = OpAMD64VSCALEFPD512 - return true - case OpMulByPowOf2MaskedFloat32x16: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v) - case OpMulByPowOf2MaskedFloat32x4: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v) - case OpMulByPowOf2MaskedFloat32x8: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v) - case OpMulByPowOf2MaskedFloat64x2: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v) - case OpMulByPowOf2MaskedFloat64x4: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v) - case OpMulByPowOf2MaskedFloat64x8: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v) case OpMulEvenWidenInt32x4: v.Op = OpAMD64VPMULDQ128 return true @@ -3064,51 +3088,33 @@ func rewriteValueAMD64(v *Value) bool { case OpMulHighUint16x8: v.Op = OpAMD64VPMULHUW128 return true - case OpMulLowInt16x16: + case OpMulInt16x16: v.Op = OpAMD64VPMULLW256 return true - case OpMulLowInt16x32: + case OpMulInt16x32: v.Op = OpAMD64VPMULLW512 return true - case OpMulLowInt16x8: + case OpMulInt16x8: v.Op = OpAMD64VPMULLW128 return true - case OpMulLowInt32x16: + case OpMulInt32x16: v.Op = OpAMD64VPMULLD512 return true - case OpMulLowInt32x4: + case OpMulInt32x4: v.Op = OpAMD64VPMULLD128 return true - case OpMulLowInt32x8: + case OpMulInt32x8: v.Op = OpAMD64VPMULLD256 return true - case OpMulLowInt64x2: + case OpMulInt64x2: v.Op = OpAMD64VPMULLQ128 return true - case OpMulLowInt64x4: + case OpMulInt64x4: v.Op = OpAMD64VPMULLQ256 return true - case OpMulLowInt64x8: + case OpMulInt64x8: v.Op = OpAMD64VPMULLQ512 return true - case OpMulLowMaskedInt16x16: - return rewriteValueAMD64_OpMulLowMaskedInt16x16(v) - case OpMulLowMaskedInt16x32: - return rewriteValueAMD64_OpMulLowMaskedInt16x32(v) - case OpMulLowMaskedInt16x8: - return rewriteValueAMD64_OpMulLowMaskedInt16x8(v) - case OpMulLowMaskedInt32x16: - return rewriteValueAMD64_OpMulLowMaskedInt32x16(v) - case OpMulLowMaskedInt32x4: - return rewriteValueAMD64_OpMulLowMaskedInt32x4(v) - case OpMulLowMaskedInt32x8: - return rewriteValueAMD64_OpMulLowMaskedInt32x8(v) - case OpMulLowMaskedInt64x2: - return rewriteValueAMD64_OpMulLowMaskedInt64x2(v) - case OpMulLowMaskedInt64x4: - return rewriteValueAMD64_OpMulLowMaskedInt64x4(v) - case OpMulLowMaskedInt64x8: - return rewriteValueAMD64_OpMulLowMaskedInt64x8(v) case OpMulMaskedFloat32x16: return rewriteValueAMD64_OpMulMaskedFloat32x16(v) case OpMulMaskedFloat32x4: @@ -3121,6 +3127,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulMaskedFloat64x4(v) case OpMulMaskedFloat64x8: return rewriteValueAMD64_OpMulMaskedFloat64x8(v) + case OpMulMaskedInt16x16: + return rewriteValueAMD64_OpMulMaskedInt16x16(v) + case OpMulMaskedInt16x32: + return rewriteValueAMD64_OpMulMaskedInt16x32(v) + case OpMulMaskedInt16x8: + return rewriteValueAMD64_OpMulMaskedInt16x8(v) + case OpMulMaskedInt32x16: + return rewriteValueAMD64_OpMulMaskedInt32x16(v) + case OpMulMaskedInt32x4: + return rewriteValueAMD64_OpMulMaskedInt32x4(v) + case OpMulMaskedInt32x8: + return rewriteValueAMD64_OpMulMaskedInt32x8(v) + case OpMulMaskedInt64x2: + return rewriteValueAMD64_OpMulMaskedInt64x2(v) + case OpMulMaskedInt64x4: + return rewriteValueAMD64_OpMulMaskedInt64x4(v) + case OpMulMaskedInt64x8: + return rewriteValueAMD64_OpMulMaskedInt64x8(v) case OpNeg16: v.Op = OpAMD64NEGL return true @@ -3406,78 +3430,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v) case OpPairDotProdMaskedInt16x8: return rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v) - case OpPairwiseAddFloat32x4: - v.Op = OpAMD64VHADDPS128 - return true - case OpPairwiseAddFloat32x8: - v.Op = OpAMD64VHADDPS256 - return true - case OpPairwiseAddFloat64x2: - v.Op = OpAMD64VHADDPD128 - return true - case OpPairwiseAddFloat64x4: - v.Op = OpAMD64VHADDPD256 - return true - case OpPairwiseAddInt16x16: - v.Op = OpAMD64VPHADDW256 - return true - case OpPairwiseAddInt16x8: - v.Op = OpAMD64VPHADDW128 - return true - case OpPairwiseAddInt32x4: - v.Op = OpAMD64VPHADDD128 - return true - case OpPairwiseAddInt32x8: - v.Op = OpAMD64VPHADDD256 - return true - case OpPairwiseAddUint16x16: - v.Op = OpAMD64VPHADDW256 - return true - case OpPairwiseAddUint16x8: - v.Op = OpAMD64VPHADDW128 - return true - case OpPairwiseAddUint32x4: - v.Op = OpAMD64VPHADDD128 - return true - case OpPairwiseAddUint32x8: - v.Op = OpAMD64VPHADDD256 - return true - case OpPairwiseSubFloat32x4: - v.Op = OpAMD64VHSUBPS128 - return true - case OpPairwiseSubFloat32x8: - v.Op = OpAMD64VHSUBPS256 - return true - case OpPairwiseSubFloat64x2: - v.Op = OpAMD64VHSUBPD128 - return true - case OpPairwiseSubFloat64x4: - v.Op = OpAMD64VHSUBPD256 - return true - case OpPairwiseSubInt16x16: - v.Op = OpAMD64VPHSUBW256 - return true - case OpPairwiseSubInt16x8: - v.Op = OpAMD64VPHSUBW128 - return true - case OpPairwiseSubInt32x4: - v.Op = OpAMD64VPHSUBD128 - return true - case OpPairwiseSubInt32x8: - v.Op = OpAMD64VPHSUBD256 - return true - case OpPairwiseSubUint16x16: - v.Op = OpAMD64VPHSUBW256 - return true - case OpPairwiseSubUint16x8: - v.Op = OpAMD64VPHSUBW128 - return true - case OpPairwiseSubUint32x4: - v.Op = OpAMD64VPHSUBD128 - return true - case OpPairwiseSubUint32x8: - v.Op = OpAMD64VPHSUBD256 - return true case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) case OpPermute2Float32x16: @@ -4152,32 +4104,56 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundFloat64x2(v) case OpRoundFloat64x4: return rewriteValueAMD64_OpRoundFloat64x4(v) + case OpRoundScaledFloat32x16: + return rewriteValueAMD64_OpRoundScaledFloat32x16(v) + case OpRoundScaledFloat32x4: + return rewriteValueAMD64_OpRoundScaledFloat32x4(v) + case OpRoundScaledFloat32x8: + return rewriteValueAMD64_OpRoundScaledFloat32x8(v) + case OpRoundScaledFloat64x2: + return rewriteValueAMD64_OpRoundScaledFloat64x2(v) + case OpRoundScaledFloat64x4: + return rewriteValueAMD64_OpRoundScaledFloat64x4(v) + case OpRoundScaledFloat64x8: + return rewriteValueAMD64_OpRoundScaledFloat64x8(v) + case OpRoundScaledMaskedFloat32x16: + return rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v) + case OpRoundScaledMaskedFloat32x4: + return rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v) + case OpRoundScaledMaskedFloat32x8: + return rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v) + case OpRoundScaledMaskedFloat64x2: + return rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v) + case OpRoundScaledMaskedFloat64x4: + return rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v) + case OpRoundScaledMaskedFloat64x8: + return rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v) + case OpRoundScaledResidueFloat32x16: + return rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v) + case OpRoundScaledResidueFloat32x4: + return rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v) + case OpRoundScaledResidueFloat32x8: + return rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v) + case OpRoundScaledResidueFloat64x2: + return rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v) + case OpRoundScaledResidueFloat64x4: + return rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v) + case OpRoundScaledResidueFloat64x8: + return rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v) + case OpRoundScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v) + case OpRoundScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v) + case OpRoundScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v) + case OpRoundScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v) + case OpRoundScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v) + case OpRoundScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) - case OpRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v) - case OpRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v) - case OpRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v) - case OpRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v) - case OpRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v) - case OpRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v) - case OpRoundWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v) - case OpRoundWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v) - case OpRoundWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v) - case OpRoundWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v) - case OpRoundWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v) - case OpRoundWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -4257,138 +4233,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v) case OpSaturatedAddDotProdMaskedInt32x8: return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v) - case OpSaturatedAddInt16x16: - v.Op = OpAMD64VPADDSW256 - return true - case OpSaturatedAddInt16x32: - v.Op = OpAMD64VPADDSW512 - return true - case OpSaturatedAddInt16x8: - v.Op = OpAMD64VPADDSW128 - return true - case OpSaturatedAddInt8x16: - v.Op = OpAMD64VPADDSB128 - return true - case OpSaturatedAddInt8x32: - v.Op = OpAMD64VPADDSB256 - return true - case OpSaturatedAddInt8x64: - v.Op = OpAMD64VPADDSB512 - return true - case OpSaturatedAddMaskedInt16x16: - return rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v) - case OpSaturatedAddMaskedInt16x32: - return rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v) - case OpSaturatedAddMaskedInt16x8: - return rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v) - case OpSaturatedAddMaskedInt8x16: - return rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v) - case OpSaturatedAddMaskedInt8x32: - return rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v) - case OpSaturatedAddMaskedInt8x64: - return rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v) - case OpSaturatedAddMaskedUint16x16: - return rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v) - case OpSaturatedAddMaskedUint16x32: - return rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v) - case OpSaturatedAddMaskedUint16x8: - return rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v) - case OpSaturatedAddMaskedUint8x16: - return rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v) - case OpSaturatedAddMaskedUint8x32: - return rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v) - case OpSaturatedAddMaskedUint8x64: - return rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v) - case OpSaturatedAddUint16x16: - v.Op = OpAMD64VPADDSW256 - return true - case OpSaturatedAddUint16x32: - v.Op = OpAMD64VPADDSW512 - return true - case OpSaturatedAddUint16x8: - v.Op = OpAMD64VPADDSW128 - return true - case OpSaturatedAddUint8x16: - v.Op = OpAMD64VPADDSB128 - return true - case OpSaturatedAddUint8x32: - v.Op = OpAMD64VPADDSB256 - return true - case OpSaturatedAddUint8x64: - v.Op = OpAMD64VPADDSB512 - return true - case OpSaturatedPairwiseAddInt16x16: - v.Op = OpAMD64VPHADDSW256 - return true - case OpSaturatedPairwiseAddInt16x8: - v.Op = OpAMD64VPHADDSW128 - return true - case OpSaturatedPairwiseSubInt16x16: - v.Op = OpAMD64VPHSUBSW256 - return true - case OpSaturatedPairwiseSubInt16x8: - v.Op = OpAMD64VPHSUBSW128 - return true - case OpSaturatedSubInt16x16: - v.Op = OpAMD64VPSUBSW256 - return true - case OpSaturatedSubInt16x32: - v.Op = OpAMD64VPSUBSW512 - return true - case OpSaturatedSubInt16x8: - v.Op = OpAMD64VPSUBSW128 - return true - case OpSaturatedSubInt8x16: - v.Op = OpAMD64VPSUBSB128 - return true - case OpSaturatedSubInt8x32: - v.Op = OpAMD64VPSUBSB256 - return true - case OpSaturatedSubInt8x64: - v.Op = OpAMD64VPSUBSB512 - return true - case OpSaturatedSubMaskedInt16x16: - return rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v) - case OpSaturatedSubMaskedInt16x32: - return rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v) - case OpSaturatedSubMaskedInt16x8: - return rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v) - case OpSaturatedSubMaskedInt8x16: - return rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v) - case OpSaturatedSubMaskedInt8x32: - return rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v) - case OpSaturatedSubMaskedInt8x64: - return rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v) - case OpSaturatedSubMaskedUint16x16: - return rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v) - case OpSaturatedSubMaskedUint16x32: - return rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v) - case OpSaturatedSubMaskedUint16x8: - return rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v) - case OpSaturatedSubMaskedUint8x16: - return rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v) - case OpSaturatedSubMaskedUint8x32: - return rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v) - case OpSaturatedSubMaskedUint8x64: - return rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v) - case OpSaturatedSubUint16x16: - v.Op = OpAMD64VPSUBSW256 - return true - case OpSaturatedSubUint16x32: - v.Op = OpAMD64VPSUBSW512 - return true - case OpSaturatedSubUint16x8: - v.Op = OpAMD64VPSUBSW128 - return true - case OpSaturatedSubUint8x16: - v.Op = OpAMD64VPSUBSB128 - return true - case OpSaturatedSubUint8x32: - v.Op = OpAMD64VPSUBSB256 - return true - case OpSaturatedSubUint8x64: - v.Op = OpAMD64VPSUBSB512 - return true case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16: return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v) case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32: @@ -4419,6 +4263,36 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) + case OpScaleFloat32x16: + v.Op = OpAMD64VSCALEFPS512 + return true + case OpScaleFloat32x4: + v.Op = OpAMD64VSCALEFPS128 + return true + case OpScaleFloat32x8: + v.Op = OpAMD64VSCALEFPS256 + return true + case OpScaleFloat64x2: + v.Op = OpAMD64VSCALEFPD128 + return true + case OpScaleFloat64x4: + v.Op = OpAMD64VSCALEFPD256 + return true + case OpScaleFloat64x8: + v.Op = OpAMD64VSCALEFPD512 + return true + case OpScaleMaskedFloat32x16: + return rewriteValueAMD64_OpScaleMaskedFloat32x16(v) + case OpScaleMaskedFloat32x4: + return rewriteValueAMD64_OpScaleMaskedFloat32x4(v) + case OpScaleMaskedFloat32x8: + return rewriteValueAMD64_OpScaleMaskedFloat32x8(v) + case OpScaleMaskedFloat64x2: + return rewriteValueAMD64_OpScaleMaskedFloat64x2(v) + case OpScaleMaskedFloat64x4: + return rewriteValueAMD64_OpScaleMaskedFloat64x4(v) + case OpScaleMaskedFloat64x8: + return rewriteValueAMD64_OpScaleMaskedFloat64x8(v) case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -5446,9 +5320,111 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSubMaskedUint8x32(v) case OpSubMaskedUint8x64: return rewriteValueAMD64_OpSubMaskedUint8x64(v) + case OpSubPairsFloat32x4: + v.Op = OpAMD64VHSUBPS128 + return true + case OpSubPairsFloat32x8: + v.Op = OpAMD64VHSUBPS256 + return true + case OpSubPairsFloat64x2: + v.Op = OpAMD64VHSUBPD128 + return true + case OpSubPairsFloat64x4: + v.Op = OpAMD64VHSUBPD256 + return true + case OpSubPairsInt16x16: + v.Op = OpAMD64VPHSUBW256 + return true + case OpSubPairsInt16x8: + v.Op = OpAMD64VPHSUBW128 + return true + case OpSubPairsInt32x4: + v.Op = OpAMD64VPHSUBD128 + return true + case OpSubPairsInt32x8: + v.Op = OpAMD64VPHSUBD256 + return true + case OpSubPairsSaturatedInt16x16: + v.Op = OpAMD64VPHSUBSW256 + return true + case OpSubPairsSaturatedInt16x8: + v.Op = OpAMD64VPHSUBSW128 + return true + case OpSubPairsUint16x16: + v.Op = OpAMD64VPHSUBW256 + return true + case OpSubPairsUint16x8: + v.Op = OpAMD64VPHSUBW128 + return true + case OpSubPairsUint32x4: + v.Op = OpAMD64VPHSUBD128 + return true + case OpSubPairsUint32x8: + v.Op = OpAMD64VPHSUBD256 + return true case OpSubPtr: v.Op = OpAMD64SUBQ return true + case OpSubSaturatedInt16x16: + v.Op = OpAMD64VPSUBSW256 + return true + case OpSubSaturatedInt16x32: + v.Op = OpAMD64VPSUBSW512 + return true + case OpSubSaturatedInt16x8: + v.Op = OpAMD64VPSUBSW128 + return true + case OpSubSaturatedInt8x16: + v.Op = OpAMD64VPSUBSB128 + return true + case OpSubSaturatedInt8x32: + v.Op = OpAMD64VPSUBSB256 + return true + case OpSubSaturatedInt8x64: + v.Op = OpAMD64VPSUBSB512 + return true + case OpSubSaturatedMaskedInt16x16: + return rewriteValueAMD64_OpSubSaturatedMaskedInt16x16(v) + case OpSubSaturatedMaskedInt16x32: + return rewriteValueAMD64_OpSubSaturatedMaskedInt16x32(v) + case OpSubSaturatedMaskedInt16x8: + return rewriteValueAMD64_OpSubSaturatedMaskedInt16x8(v) + case OpSubSaturatedMaskedInt8x16: + return rewriteValueAMD64_OpSubSaturatedMaskedInt8x16(v) + case OpSubSaturatedMaskedInt8x32: + return rewriteValueAMD64_OpSubSaturatedMaskedInt8x32(v) + case OpSubSaturatedMaskedInt8x64: + return rewriteValueAMD64_OpSubSaturatedMaskedInt8x64(v) + case OpSubSaturatedMaskedUint16x16: + return rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v) + case OpSubSaturatedMaskedUint16x32: + return rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v) + case OpSubSaturatedMaskedUint16x8: + return rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v) + case OpSubSaturatedMaskedUint8x16: + return rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v) + case OpSubSaturatedMaskedUint8x32: + return rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v) + case OpSubSaturatedMaskedUint8x64: + return rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v) + case OpSubSaturatedUint16x16: + v.Op = OpAMD64VPSUBSW256 + return true + case OpSubSaturatedUint16x32: + v.Op = OpAMD64VPSUBSW512 + return true + case OpSubSaturatedUint16x8: + v.Op = OpAMD64VPSUBSW128 + return true + case OpSubSaturatedUint8x16: + v.Op = OpAMD64VPSUBSB128 + return true + case OpSubSaturatedUint8x32: + v.Op = OpAMD64VPSUBSB256 + return true + case OpSubSaturatedUint8x64: + v.Op = OpAMD64VPSUBSB512 + return true case OpSubUint16x16: v.Op = OpAMD64VPSUBW256 return true @@ -5516,30 +5492,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncFloat64x2(v) case OpTruncFloat64x4: return rewriteValueAMD64_OpTruncFloat64x4(v) - case OpTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v) - case OpTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v) - case OpTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v) - case OpTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v) - case OpTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) - case OpTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) - case OpTruncWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v) - case OpTruncWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v) - case OpTruncWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v) - case OpTruncWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v) - case OpTruncWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v) - case OpTruncWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v) + case OpTruncScaledFloat32x16: + return rewriteValueAMD64_OpTruncScaledFloat32x16(v) + case OpTruncScaledFloat32x4: + return rewriteValueAMD64_OpTruncScaledFloat32x4(v) + case OpTruncScaledFloat32x8: + return rewriteValueAMD64_OpTruncScaledFloat32x8(v) + case OpTruncScaledFloat64x2: + return rewriteValueAMD64_OpTruncScaledFloat64x2(v) + case OpTruncScaledFloat64x4: + return rewriteValueAMD64_OpTruncScaledFloat64x4(v) + case OpTruncScaledFloat64x8: + return rewriteValueAMD64_OpTruncScaledFloat64x8(v) + case OpTruncScaledMaskedFloat32x16: + return rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v) + case OpTruncScaledMaskedFloat32x4: + return rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v) + case OpTruncScaledMaskedFloat32x8: + return rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v) + case OpTruncScaledMaskedFloat64x2: + return rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v) + case OpTruncScaledMaskedFloat64x4: + return rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v) + case OpTruncScaledMaskedFloat64x8: + return rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v) + case OpTruncScaledResidueFloat32x16: + return rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v) + case OpTruncScaledResidueFloat32x4: + return rewriteValueAMD64_OpTruncScaledResidueFloat32x4(v) + case OpTruncScaledResidueFloat32x8: + return rewriteValueAMD64_OpTruncScaledResidueFloat32x8(v) + case OpTruncScaledResidueFloat64x2: + return rewriteValueAMD64_OpTruncScaledResidueFloat64x2(v) + case OpTruncScaledResidueFloat64x4: + return rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v) + case OpTruncScaledResidueFloat64x8: + return rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v) + case OpTruncScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v) + case OpTruncScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v) + case OpTruncScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v) + case OpTruncScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v) + case OpTruncScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v) + case OpTruncScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v) case OpUnsignedSignedQuadDotProdAccumulateInt32x16: v.Op = OpAMD64VPDPBUSD512 return true @@ -29162,6 +29162,222 @@ func rewriteValueAMD64_OpAddMaskedUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpAddSaturatedMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] // match: (Addr {sym} base) @@ -30521,9 +30737,9 @@ func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x16 [a] x) + // match: (CeilScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30534,9 +30750,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x4 [a] x) + // match: (CeilScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30547,9 +30763,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x8 [a] x) + // match: (CeilScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30560,9 +30776,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x2 [a] x) + // match: (CeilScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30573,9 +30789,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x4 [a] x) + // match: (CeilScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30586,9 +30802,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x8 [a] x) + // match: (CeilScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30599,11 +30815,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (CeilScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30617,11 +30833,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (CeilScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30635,11 +30851,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (CeilScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30653,11 +30869,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (CeilScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30671,11 +30887,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (CeilScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30689,11 +30905,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (CeilScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30707,6 +30923,192 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpCompressFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -32596,750 +32998,6 @@ func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -34731,9 +34389,9 @@ func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x16 [a] x) + // match: (FloorScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34744,9 +34402,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x4 [a] x) + // match: (FloorScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34757,9 +34415,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x8 [a] x) + // match: (FloorScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34770,9 +34428,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x2 [a] x) + // match: (FloorScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34783,9 +34441,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x4 [a] x) + // match: (FloorScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34796,9 +34454,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x8 [a] x) + // match: (FloorScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34809,11 +34467,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (FloorScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34827,11 +34485,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (FloorScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34845,11 +34503,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (FloorScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34863,11 +34521,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (FloorScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34881,11 +34539,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (FloorScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34899,11 +34557,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (FloorScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34917,6 +34575,192 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] @@ -43583,114 +43427,6 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } return false } -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat32x16 x y mask) - // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat32x4 x y mask) - // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat32x8 x y mask) - // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat64x2 x y mask) - // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat64x4 x y mask) - // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat64x8 x y mask) - // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43907,270 +43643,270 @@ func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMulLowMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt16x16 x y mask) - // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MulMaskedFloat32x16 x y mask) + // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VMULPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt16x32 x y mask) - // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MulMaskedFloat32x4 x y mask) + // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VMULPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt16x8 x y mask) - // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MulMaskedFloat32x8 x y mask) + // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VMULPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt32x16 x y mask) - // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulMaskedFloat64x2 x y mask) + // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VMULPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt32x4 x y mask) - // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulMaskedFloat64x4 x y mask) + // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VMULPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt32x8 x y mask) - // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulMaskedFloat64x8 x y mask) + // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VMULPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt64x2 x y mask) - // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulMaskedInt16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt64x4 x y mask) - // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulMaskedInt16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt64x8 x y mask) - // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulMaskedInt16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat32x16 x y mask) - // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulMaskedInt32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked512) + v.reset(OpAMD64VPMULLDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat32x4 x y mask) - // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulMaskedInt32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked128) + v.reset(OpAMD64VPMULLDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat32x8 x y mask) - // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulMaskedInt32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked256) + v.reset(OpAMD64VPMULLDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat64x2 x y mask) - // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulMaskedInt64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked128) + v.reset(OpAMD64VPMULLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat64x4 x y mask) - // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulMaskedInt64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked256) + v.reset(OpAMD64VPMULLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat64x8 x y mask) - // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulMaskedInt64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked512) + v.reset(OpAMD64VPMULLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -48243,21 +47979,9 @@ func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundToEven(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundToEven x) - // result: (ROUNDSD [0] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x16 [a] x) + // match: (RoundScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48268,9 +47992,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x4 [a] x) + // match: (RoundScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48281,9 +48005,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x8 [a] x) + // match: (RoundScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48294,9 +48018,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x2 [a] x) + // match: (RoundScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48307,9 +48031,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x4 [a] x) + // match: (RoundScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48320,9 +48044,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x8 [a] x) + // match: (RoundScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48333,11 +48057,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (RoundScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48351,11 +48075,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (RoundScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48369,11 +48093,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (RoundScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48387,11 +48111,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (RoundScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48405,11 +48129,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (RoundScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48423,11 +48147,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (RoundScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48441,6 +48165,204 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -49829,552 +49751,228 @@ func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedAddMaskedUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) + v.reset(OpAMD64VPMADDUBSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedInt16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) + v.reset(OpAMD64VPMADDUBSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedInt16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) + v.reset(OpAMD64VPMADDUBSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (ScaleMaskedFloat32x16 x y mask) + // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (ScaleMaskedFloat32x4 x y mask) + // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (ScaleMaskedFloat32x8 x y mask) + // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (ScaleMaskedFloat64x2 x y mask) + // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (ScaleMaskedFloat64x4 x y mask) + // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (ScaleMaskedFloat64x8 x y mask) + // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -54763,6 +54361,222 @@ func rewriteValueAMD64_OpSubMaskedUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpSubSaturatedMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] // match: (Trunc x) @@ -54823,9 +54637,9 @@ func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x16 [a] x) + // match: (TruncScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54836,9 +54650,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x4 [a] x) + // match: (TruncScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54849,9 +54663,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x8 [a] x) + // match: (TruncScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54862,9 +54676,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x2 [a] x) + // match: (TruncScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54875,9 +54689,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x4 [a] x) + // match: (TruncScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54888,9 +54702,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x8 [a] x) + // match: (TruncScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54901,11 +54715,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (TruncScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54919,11 +54733,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (TruncScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54937,11 +54751,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (TruncScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54955,11 +54769,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (TruncScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54973,11 +54787,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (TruncScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54991,11 +54805,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (TruncScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -55009,6 +54823,192 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 7a7367ee1e7503..511974ffa1bf34 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -101,6 +101,44 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AddMasked", opLen3(ssa.OpAddMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AddMasked", opLen3(ssa.OpAddMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AddMasked", opLen3(ssa.OpAddMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AddPairs", opLen2(ssa.OpAddPairsFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AddPairs", opLen2(ssa.OpAddPairsFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.AddPairs", opLen2(ssa.OpAddPairsFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.AddPairs", opLen2(ssa.OpAddPairsFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AddPairs", opLen2(ssa.OpAddPairsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddPairs", opLen2(ssa.OpAddPairsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.AddPairs", opLen2(ssa.OpAddPairsInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddPairs", opLen2(ssa.OpAddPairsInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.AddPairs", opLen2(ssa.OpAddPairsUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddPairs", opLen2(ssa.OpAddPairsUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.AddPairs", opLen2(ssa.OpAddPairsUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AddPairs", opLen2(ssa.OpAddPairsUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AddPairsSaturated", opLen2(ssa.OpAddPairsSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddPairsSaturated", opLen2(ssa.OpAddPairsSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.AddSaturated", opLen2(ssa.OpAddSaturatedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddSaturated", opLen2(ssa.OpAddSaturatedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddSaturated", opLen2(ssa.OpAddSaturatedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AddSaturated", opLen2(ssa.OpAddSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddSaturated", opLen2(ssa.OpAddSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AddSaturated", opLen2(ssa.OpAddSaturatedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AddSaturated", opLen2(ssa.OpAddSaturatedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AddSaturated", opLen2(ssa.OpAddSaturatedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AddSaturated", opLen2(ssa.OpAddSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) @@ -217,18 +255,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Compress", opLen2(ssa.OpCompressFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Compress", opLen2(ssa.OpCompressFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Compress", opLen2(ssa.OpCompressFloat32x16, types.TypeVec512), sys.AMD64) @@ -271,54 +321,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) @@ -398,18 +400,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) @@ -860,18 +874,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Mul", opLen2(ssa.OpMulInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Mul", opLen2(ssa.OpMulInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Mul", opLen2(ssa.OpMulInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Mul", opLen2(ssa.OpMulInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Mul", opLen2(ssa.OpMulInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Mul", opLen2(ssa.OpMulInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Mul", opLen2(ssa.OpMulInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Mul", opLen2(ssa.OpMulInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Mul", opLen2(ssa.OpMulInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) @@ -900,30 +911,21 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.MulMasked", opLen3(ssa.OpMulMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MulMasked", opLen3(ssa.OpMulMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MulMasked", opLen3(ssa.OpMulMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulMasked", opLen3(ssa.OpMulMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulMasked", opLen3(ssa.OpMulMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulMasked", opLen3(ssa.OpMulMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulMasked", opLen3(ssa.OpMulMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulMasked", opLen3(ssa.OpMulMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MulMasked", opLen3(ssa.OpMulMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulMasked", opLen3(ssa.OpMulMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulMasked", opLen3(ssa.OpMulMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulMasked", opLen3(ssa.OpMulMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -1026,30 +1028,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) @@ -1306,76 +1284,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) @@ -1388,6 +1326,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Scale", opLen2(ssa.OpScaleFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Scale", opLen2(ssa.OpScaleFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Scale", opLen2(ssa.OpScaleFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Scale", opLen2(ssa.OpScaleFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Scale", opLen2(ssa.OpScaleFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Scale", opLen2(ssa.OpScaleFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) @@ -1772,22 +1722,72 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.SubMasked", opLen3(ssa.OpSubMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.SubMasked", opLen3(ssa.OpSubMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.SubMasked", opLen3(ssa.OpSubMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SubPairs", opLen2(ssa.OpSubPairsFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.SubPairs", opLen2(ssa.OpSubPairsFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.SubPairs", opLen2(ssa.OpSubPairsFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.SubPairs", opLen2(ssa.OpSubPairsFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.SubPairs", opLen2(ssa.OpSubPairsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubPairs", opLen2(ssa.OpSubPairsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.SubPairs", opLen2(ssa.OpSubPairsInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SubPairs", opLen2(ssa.OpSubPairsInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.SubPairs", opLen2(ssa.OpSubPairsUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubPairs", opLen2(ssa.OpSubPairsUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.SubPairs", opLen2(ssa.OpSubPairsUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SubPairs", opLen2(ssa.OpSubPairsUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.SubPairsSaturated", opLen2(ssa.OpSubPairsSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubPairsSaturated", opLen2(ssa.OpSubPairsSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.SubSaturated", opLen2(ssa.OpSubSaturatedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SubSaturated", opLen2(ssa.OpSubSaturatedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SubSaturated", opLen2(ssa.OpSubSaturatedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SubSaturated", opLen2(ssa.OpSubSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubSaturated", opLen2(ssa.OpSubSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SubSaturated", opLen2(ssa.OpSubSaturatedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SubSaturated", opLen2(ssa.OpSubSaturatedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SubSaturated", opLen2(ssa.OpSubSaturatedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SubSaturated", opLen2(ssa.OpSubSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/binary_test.go b/src/simd/binary_test.go index b7daf736f4e264..c82bc070e12322 100644 --- a/src/simd/binary_test.go +++ b/src/simd/binary_test.go @@ -309,42 +309,42 @@ func TestMul(t *testing.T) { testFloat64x2Binary(t, simd.Float64x2.Mul, mulSlice[float64]) testFloat64x4Binary(t, simd.Float64x4.Mul, mulSlice[float64]) - testInt16x16Binary(t, simd.Int16x16.MulLow, mulSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.MulLow, mulSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.MulLow, mulSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.MulLow, mulSlice[int32]) + testInt16x16Binary(t, simd.Int16x16.Mul, mulSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Mul, mulSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Mul, mulSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Mul, mulSlice[int32]) - // testInt8x16Binary(t, simd.Int8x16.MulLow, mulSlice[int8]) // nope - // testInt8x32Binary(t, simd.Int8x32.MulLow, mulSlice[int8]) + // testInt8x16Binary(t, simd.Int8x16.Mul, mulSlice[int8]) // nope + // testInt8x32Binary(t, simd.Int8x32.Mul, mulSlice[int8]) - // TODO we should be able to do these, there's no difference between signed/unsigned mulLow - // testUint16x16Binary(t, simd.Uint16x16.MulLow, mulSlice[uint16]) - // testUint16x8Binary(t, simd.Uint16x8.MulLow, mulSlice[uint16]) - // testUint32x4Binary(t, simd.Uint32x4.MulLow, mulSlice[uint32]) - // testUint32x8Binary(t, simd.Uint32x8.MulLow, mulSlice[uint32]) - // testUint64x2Binary(t, simd.Uint64x2.MulLow, mulSlice[uint64]) - // testUint64x4Binary(t, simd.Uint64x4.MulLow, mulSlice[uint64]) + // TODO we should be able to do these, there's no difference between signed/unsigned Mul + // testUint16x16Binary(t, simd.Uint16x16.Mul, mulSlice[uint16]) + // testUint16x8Binary(t, simd.Uint16x8.Mul, mulSlice[uint16]) + // testUint32x4Binary(t, simd.Uint32x4.Mul, mulSlice[uint32]) + // testUint32x8Binary(t, simd.Uint32x8.Mul, mulSlice[uint32]) + // testUint64x2Binary(t, simd.Uint64x2.Mul, mulSlice[uint64]) + // testUint64x4Binary(t, simd.Uint64x4.Mul, mulSlice[uint64]) - // testUint8x16Binary(t, simd.Uint8x16.MulLow, mulSlice[uint8]) // nope - // testUint8x32Binary(t, simd.Uint8x32.MulLow, mulSlice[uint8]) + // testUint8x16Binary(t, simd.Uint8x16.Mul, mulSlice[uint8]) // nope + // testUint8x32Binary(t, simd.Uint8x32.Mul, mulSlice[uint8]) if simd.HasAVX512() { - testInt64x2Binary(t, simd.Int64x2.MulLow, mulSlice[int64]) // avx512 only - testInt64x4Binary(t, simd.Int64x4.MulLow, mulSlice[int64]) + testInt64x2Binary(t, simd.Int64x2.Mul, mulSlice[int64]) // avx512 only + testInt64x4Binary(t, simd.Int64x4.Mul, mulSlice[int64]) testFloat32x16Binary(t, simd.Float32x16.Mul, mulSlice[float32]) testFloat64x8Binary(t, simd.Float64x8.Mul, mulSlice[float64]) - // testInt8x64Binary(t, simd.Int8x64.MulLow, mulSlice[int8]) // nope - testInt16x32Binary(t, simd.Int16x32.MulLow, mulSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.MulLow, mulSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.MulLow, mulSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.MulLow, mulSlice[uint8]) // nope + // testInt8x64Binary(t, simd.Int8x64.Mul, mulSlice[int8]) // nope + testInt16x32Binary(t, simd.Int16x32.Mul, mulSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Mul, mulSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Mul, mulSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Mul, mulSlice[uint8]) // nope // TODO signed should do the job - // testUint16x32Binary(t, simd.Uint16x32.MulLow, mulSlice[uint16]) - // testUint32x16Binary(t, simd.Uint32x16.MulLow, mulSlice[uint32]) - // testUint64x8Binary(t, simd.Uint64x8.MulLow, mulSlice[uint64]) + // testUint16x32Binary(t, simd.Uint16x32.Mul, mulSlice[uint16]) + // testUint32x16Binary(t, simd.Uint32x16.Mul, mulSlice[uint32]) + // testUint64x8Binary(t, simd.Uint64x8.Mul, mulSlice[uint64]) } } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5776350fe9f136..dc42e73a53a2dc 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -556,6 +556,242 @@ func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 +/* AddPairs */ + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) AddPairs(y Float32x4) Float32x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) AddPairs(y Float32x8) Float32x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) AddPairs(y Float64x2) Float64x2 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) AddPairs(y Float64x4) Float64x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) AddPairs(y Int16x8) Int16x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) AddPairs(y Int16x16) Int16x16 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) AddPairs(y Int32x4) Int32x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) AddPairs(y Int32x8) Int32x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) AddPairs(y Uint16x8) Uint16x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) AddPairs(y Uint16x16) Uint16x16 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) AddPairs(y Uint32x4) Uint32x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) AddPairs(y Uint32x8) Uint32x8 + +/* AddPairsSaturated */ + +// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) AddPairsSaturated(y Int16x8) Int16x8 + +// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) AddPairsSaturated(y Int16x16) Int16x16 + +/* AddSaturated */ + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) AddSaturated(y Int8x16) Int8x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) AddSaturated(y Int8x32) Int8x32 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x64) AddSaturated(y Int8x64) Int8x64 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) AddSaturated(y Int16x8) Int16x8 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) AddSaturated(y Int16x16) Int16x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x32) AddSaturated(y Int16x32) Int16x32 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Uint8x16) AddSaturated(y Uint8x16) Uint8x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Uint16x8) AddSaturated(y Uint16x8) Uint16x8 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Uint16x16) AddSaturated(y Uint16x16) Uint16x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 + +/* AddSaturatedMasked */ + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x16) AddSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x32) AddSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x64) AddSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x8) AddSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x16) AddSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x32) AddSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x16) AddSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x32) AddSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x64) AddSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x8) AddSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x16) AddSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x32) AddSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 + /* AddSub */ // AddSub subtracts even elements and adds odd elements of two vectors. @@ -1244,105 +1480,205 @@ func (x Float64x2) Ceil() Float64x2 // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Ceil() Float64x4 -/* CeilWithPrecision */ +/* CeilScaled */ -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 +func (x Float32x4) CeilScaled(prec uint8) Float32x4 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 +func (x Float32x8) CeilScaled(prec uint8) Float32x8 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 +func (x Float32x16) CeilScaled(prec uint8) Float32x16 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 +func (x Float64x2) CeilScaled(prec uint8) Float64x2 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 +func (x Float64x4) CeilScaled(prec uint8) Float64x4 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 +func (x Float64x8) CeilScaled(prec uint8) Float64x8 -/* CeilWithPrecisionMasked */ +/* CeilScaledMasked */ -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) CeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) CeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) CeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) CeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 + +/* CeilScaledResidue */ + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) CeilScaledResidue(prec uint8) Float32x4 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) CeilScaledResidue(prec uint8) Float32x8 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) CeilScaledResidue(prec uint8) Float32x16 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) CeilScaledResidue(prec uint8) Float64x2 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 + +/* CeilScaledResidueMasked */ + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Compress */ @@ -1606,429 +1942,29 @@ func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 // Asm: VCVTPS2UDQ, CPU Feature: AVX512F func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 -/* DiffWithCeilWithPrecision */ +/* Div */ -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithCeilWithPrecision(prec uint8) Float32x4 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x4) Div(y Float32x4) Float32x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithCeilWithPrecision(prec uint8) Float32x8 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x8) Div(y Float32x8) Float32x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithCeilWithPrecision(prec uint8) Float32x16 +// Asm: VDIVPS, CPU Feature: AVX512F +func (x Float32x16) Div(y Float32x16) Float32x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithCeilWithPrecision(prec uint8) Float64x2 +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x2) Div(y Float64x2) Float64x2 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithCeilWithPrecision(prec uint8) Float64x4 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 - -/* DiffWithCeilWithPrecisionMasked */ - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* DiffWithFloorWithPrecision */ - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithFloorWithPrecision(prec uint8) Float32x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithFloorWithPrecision(prec uint8) Float32x8 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithFloorWithPrecision(prec uint8) Float32x16 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithFloorWithPrecision(prec uint8) Float64x2 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithFloorWithPrecision(prec uint8) Float64x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 - -/* DiffWithFloorWithPrecisionMasked */ - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* DiffWithRoundWithPrecision */ - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithRoundWithPrecision(prec uint8) Float32x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithRoundWithPrecision(prec uint8) Float32x8 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithRoundWithPrecision(prec uint8) Float32x16 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithRoundWithPrecision(prec uint8) Float64x2 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithRoundWithPrecision(prec uint8) Float64x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 - -/* DiffWithRoundWithPrecisionMasked */ - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* DiffWithTruncWithPrecision */ - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithTruncWithPrecision(prec uint8) Float32x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithTruncWithPrecision(prec uint8) Float32x8 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithTruncWithPrecision(prec uint8) Float32x16 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithTruncWithPrecision(prec uint8) Float64x2 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithTruncWithPrecision(prec uint8) Float64x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 - -/* DiffWithTruncWithPrecisionMasked */ - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* Div */ - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x4) Div(y Float32x4) Float32x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x8) Div(y Float32x8) Float32x8 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x16) Div(y Float32x16) Float32x16 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x2) Div(y Float64x2) Float64x2 - -// Div divides elements of two vectors. +// Div divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX func (x Float64x4) Div(y Float64x4) Float64x4 @@ -2485,105 +2421,205 @@ func (x Float64x2) Floor() Float64x2 // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Floor() Float64x4 -/* FloorWithPrecision */ +/* FloorScaled */ + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) FloorScaled(prec uint8) Float32x4 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaled rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 +func (x Float32x8) FloorScaled(prec uint8) Float32x8 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) FloorScaled(prec uint8) Float32x16 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) FloorScaled(prec uint8) Float64x2 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) FloorScaled(prec uint8) Float64x4 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) FloorScaled(prec uint8) Float64x8 + +/* FloorScaledMasked */ + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 + +/* FloorScaledResidue */ + +// FloorScaledResidue computes the difference after flooring with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 -/* FloorWithPrecisionMasked */ +/* FloorScaledResidueMasked */ -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) FloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) FloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) FloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) FloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) FloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) FloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* FusedMultiplyAdd */ @@ -5427,81 +5463,50 @@ func (x Float64x4) Mul(y Float64x4) Float64x4 // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) Mul(y Float64x8) Float64x8 -/* MulByPowOf2 */ - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 - -// MulByPowOf2 multiplies elements by a power of 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) Mul(y Int16x8) Int16x8 -// MulByPowOf2 multiplies elements by a power of 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) Mul(y Int16x16) Int16x16 -// MulByPowOf2 multiplies elements by a power of 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 - -/* MulByPowOf2Masked */ +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x32) Mul(y Int16x32) Int16x32 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x4) MulByPowOf2Masked(y Float32x4, mask Mask32x4) Float32x4 +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) Mul(y Int32x4) Int32x4 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x8) MulByPowOf2Masked(y Float32x8, mask Mask32x8) Float32x8 +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) Mul(y Int32x8) Int32x8 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x16) MulByPowOf2Masked(y Float32x16, mask Mask32x16) Float32x16 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x16) Mul(y Int32x16) Int32x16 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x2) MulByPowOf2Masked(y Float64x2, mask Mask64x2) Float64x2 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x2) Mul(y Int64x2) Int64x2 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x4) MulByPowOf2Masked(y Float64x4, mask Mask64x4) Float64x4 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x4) Mul(y Int64x4) Int64x4 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x8) MulByPowOf2Masked(y Float64x8, mask Mask64x8) Float64x8 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x8) Mul(y Int64x8) Int64x8 /* MulEvenWiden */ @@ -5691,161 +5696,112 @@ func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 -/* MulLow */ - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) MulLow(y Int16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) MulLow(y Int16x16) Int16x16 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x32) MulLow(y Int16x32) Int16x32 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) MulLow(y Int32x4) Int32x4 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) MulLow(y Int32x8) Int32x8 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x16) MulLow(y Int32x16) Int32x16 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x2) MulLow(y Int64x2) Int64x2 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x4) MulLow(y Int64x4) Int64x4 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x8) MulLow(y Int64x8) Int64x8 - -/* MulLowMasked */ +/* MulMasked */ -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x8) MulLowMasked(y Int16x8, mask Mask16x8) Int16x8 +// Asm: VMULPS, CPU Feature: AVX512F +func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x16) MulLowMasked(y Int16x16, mask Mask16x16) Int16x16 +// Asm: VMULPS, CPU Feature: AVX512F +func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x32) MulLowMasked(y Int16x32, mask Mask16x32) Int16x32 +// Asm: VMULPS, CPU Feature: AVX512F +func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x4) MulLowMasked(y Int32x4, mask Mask32x4) Int32x4 +// Asm: VMULPD, CPU Feature: AVX512F +func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x8) MulLowMasked(y Int32x8, mask Mask32x8) Int32x8 +// Asm: VMULPD, CPU Feature: AVX512F +func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x16) MulLowMasked(y Int32x16, mask Mask32x16) Int32x16 +// Asm: VMULPD, CPU Feature: AVX512F +func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x2) MulLowMasked(y Int64x2, mask Mask64x2) Int64x2 +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x8) MulMasked(y Int16x8, mask Mask16x8) Int16x8 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x4) MulLowMasked(y Int64x4, mask Mask64x4) Int64x4 +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x16) MulMasked(y Int16x16, mask Mask16x16) Int16x16 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x8) MulLowMasked(y Int64x8, mask Mask64x8) Int64x8 - -/* MulMasked */ +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x32) MulMasked(y Int16x32, mask Mask16x32) Int16x32 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x4) MulMasked(y Int32x4, mask Mask32x4) Int32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x8) MulMasked(y Int32x8, mask Mask32x8) Int32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x16) MulMasked(y Int32x16, mask Mask32x16) Int32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x2) MulMasked(y Int64x2, mask Mask64x2) Int64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x4) MulMasked(y Int64x4, mask Mask64x4) Int64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x8) MulMasked(y Int64x8, mask Mask64x8) Int64x8 /* NotEqual */ @@ -6402,216 +6358,68 @@ func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* PairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 - -/* PairDotProdMasked */ - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 - -/* PairwiseAdd */ - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 +// Asm: VPORQ, CPU Feature: AVX512F +func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// OrMasked performs a bitwise OR operation between two vectors. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 - -/* PairwiseSub */ +// Asm: VPORQ, CPU Feature: AVX512F +func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// OrMasked performs a bitwise OR operation between two vectors. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Asm: VPORQ, CPU Feature: AVX512F +func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 +/* PairDotProd */ -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBD, CPU Feature: AVX -func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +/* PairDotProdMasked */ -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProdMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProdMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProdMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 /* Permute */ @@ -8490,526 +8298,302 @@ func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* Round */ - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 - -/* RoundWithPrecision */ - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundWithPrecision(prec uint8) Float32x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundWithPrecision(prec uint8) Float32x8 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundWithPrecision(prec uint8) Float32x16 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundWithPrecision(prec uint8) Float64x2 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundWithPrecision(prec uint8) Float64x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 - -/* RoundWithPrecisionMasked */ - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* SaturatedAdd */ - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX -func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 - -/* SaturatedAddDotProd */ - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 +// This operation is applied selectively under a write mask. +// +// Asm: VPRORVD, CPU Feature: AVX512F +func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 -/* SaturatedAddDotProdMasked */ +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPRORVD, CPU Feature: AVX512F +func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +// Asm: VPRORVQ, CPU Feature: AVX512F +func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +// Asm: VPRORVQ, CPU Feature: AVX512F +func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +// Asm: VPRORVQ, CPU Feature: AVX512F +func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* SaturatedAddMasked */ +/* Round */ -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// Round rounds elements to the nearest integer. // -// This operation is applied selectively under a write mask. +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 + +// Round rounds elements to the nearest integer. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedAddMasked(y Int8x16, mask Mask8x16) Int8x16 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// Round rounds elements to the nearest integer. // -// This operation is applied selectively under a write mask. +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 + +// Round rounds elements to the nearest integer. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedAddMasked(y Int8x32, mask Mask8x32) Int8x32 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +/* RoundScaled */ + +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedAddMasked(y Int8x64, mask Mask8x64) Int8x64 +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) RoundScaled(prec uint8) Float32x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedAddMasked(y Int16x8, mask Mask16x8) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x8) RoundScaled(prec uint8) Float32x8 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedAddMasked(y Int16x16, mask Mask16x16) Int16x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) RoundScaled(prec uint8) Float32x16 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedAddMasked(y Int16x32, mask Mask16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) RoundScaled(prec uint8) Float64x2 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedAddMasked(y Uint8x16, mask Mask8x16) Uint8x16 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) RoundScaled(prec uint8) Float64x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedAddMasked(y Uint8x32, mask Mask8x32) Uint8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) RoundScaled(prec uint8) Float64x8 + +/* RoundScaledMasked */ -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedAddMasked(y Uint8x64, mask Mask8x64) Uint8x64 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) RoundScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedAddMasked(y Uint16x8, mask Mask16x8) Uint16x8 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x8) RoundScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) RoundScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -/* SaturatedPairwiseAdd */ - -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) RoundScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// RoundScaledMasked rounds elements with specified precision. // -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 - -/* SaturatedPairwiseSub */ - -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) RoundScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// RoundScaledMasked rounds elements with specified precision. // -// Asm: VPHSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) RoundScaledMasked(prec uint8, mask Mask64x8) Float64x8 -/* SaturatedSub */ +/* RoundScaledResidue */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) RoundScaledResidue(prec uint8) Float32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) RoundScaledResidue(prec uint8) Float32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) RoundScaledResidue(prec uint8) Float32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) RoundScaledResidue(prec uint8) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) RoundScaledResidue(prec uint8) Float64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) RoundScaledResidue(prec uint8) Float64x8 -/* SaturatedSubMasked */ +/* RoundScaledResidueMasked */ -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedSubMasked(y Int8x16, mask Mask8x16) Int8x16 - -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedSubMasked(y Int8x32, mask Mask8x32) Int8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) RoundScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedSubMasked(y Int8x64, mask Mask8x64) Int8x64 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) RoundScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedSubMasked(y Int16x8, mask Mask16x8) Int16x8 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) RoundScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedSubMasked(y Int16x16, mask Mask16x16) Int16x16 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) RoundScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedSubMasked(y Int16x32, mask Mask16x32) Int16x32 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) RoundScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedSubMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// This operation is applied selectively under a write mask. +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) RoundScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 + +/* SaturatedAddDotProd */ + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedSubMasked(y Uint8x32, mask Mask8x32) Uint8x32 +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. // -// This operation is applied selectively under a write mask. +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedSubMasked(y Uint8x64, mask Mask8x64) Uint8x64 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +/* SaturatedAddDotProdMasked */ + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedSubMasked(y Uint16x8, mask Mask16x8) Uint16x8 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedSubMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedSubMasked(y Uint16x32, mask Mask16x32) Uint16x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 /* SaturatedUnsignedSignedPairDotProd */ @@ -9066,36 +8650,112 @@ func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int3 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + +/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ + +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 + +/* Scale */ + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x4) Scale(y Float32x4) Float32x4 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x8) Scale(y Float32x8) Float32x8 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x16) Scale(y Float32x16) Float32x16 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x2) Scale(y Float64x2) Float64x2 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x4) Scale(y Float64x4) Float64x4 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x8) Scale(y Float64x8) Float64x8 + +/* ScaleMasked */ + +// ScaleMasked multiplies elements by a power of 2. +// +// This operation is applied selectively under a write mask. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x4) ScaleMasked(y Float32x4, mask Mask32x4) Float32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +// This operation is applied selectively under a write mask. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x8) ScaleMasked(y Float32x8, mask Mask32x8) Float32x8 -/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ +// ScaleMasked multiplies elements by a power of 2. +// +// This operation is applied selectively under a write mask. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x16) ScaleMasked(y Float32x16, mask Mask32x16) Float32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x2) ScaleMasked(y Float64x2, mask Mask64x2) Float64x2 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x4) ScaleMasked(y Float64x4, mask Mask64x4) Float64x4 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 /* Set128 */ @@ -11753,6 +11413,242 @@ func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 +/* SubPairs */ + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x4) SubPairs(y Float32x4) Float32x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x8) SubPairs(y Float32x8) Float32x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x2) SubPairs(y Float64x2) Float64x2 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x4) SubPairs(y Float64x4) Float64x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Int16x8) SubPairs(y Int16x8) Int16x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Int16x16) SubPairs(y Int16x16) Int16x16 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Int32x4) SubPairs(y Int32x4) Int32x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Int32x8) SubPairs(y Int32x8) Int32x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Uint16x8) SubPairs(y Uint16x8) Uint16x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Uint16x16) SubPairs(y Uint16x16) Uint16x16 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Uint32x4) SubPairs(y Uint32x4) Uint32x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Uint32x8) SubPairs(y Uint32x8) Uint32x8 + +/* SubPairsSaturated */ + +// SubPairsSaturated horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBSW, CPU Feature: AVX +func (x Int16x8) SubPairsSaturated(y Int16x8) Int16x8 + +// SubPairsSaturated horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBSW, CPU Feature: AVX2 +func (x Int16x16) SubPairsSaturated(y Int16x16) Int16x16 + +/* SubSaturated */ + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX +func (x Int8x16) SubSaturated(y Int8x16) Int8x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Int8x32) SubSaturated(y Int8x32) Int8x32 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x64) SubSaturated(y Int8x64) Int8x64 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX +func (x Int16x8) SubSaturated(y Int16x8) Int16x8 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Int16x16) SubSaturated(y Int16x16) Int16x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x32) SubSaturated(y Int16x32) Int16x32 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX +func (x Uint8x16) SubSaturated(y Uint8x16) Uint8x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Uint8x32) SubSaturated(y Uint8x32) Uint8x32 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x64) SubSaturated(y Uint8x64) Uint8x64 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX +func (x Uint16x8) SubSaturated(y Uint16x8) Uint16x8 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 + +/* SubSaturatedMasked */ + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x16) SubSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x32) SubSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x64) SubSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x8) SubSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x16) SubSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x32) SubSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x16) SubSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x32) SubSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x64) SubSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x8) SubSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x16) SubSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x32) SubSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 + /* Trunc */ // Trunc truncates elements towards zero. @@ -11775,105 +11671,205 @@ func (x Float64x2) Trunc() Float64x2 // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Trunc() Float64x4 -/* TruncWithPrecision */ +/* TruncScaled */ -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) TruncWithPrecision(prec uint8) Float32x4 +func (x Float32x4) TruncScaled(prec uint8) Float32x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) TruncWithPrecision(prec uint8) Float32x8 +func (x Float32x8) TruncScaled(prec uint8) Float32x8 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) TruncWithPrecision(prec uint8) Float32x16 +func (x Float32x16) TruncScaled(prec uint8) Float32x16 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) TruncWithPrecision(prec uint8) Float64x2 +func (x Float64x2) TruncScaled(prec uint8) Float64x2 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) TruncWithPrecision(prec uint8) Float64x4 +func (x Float64x4) TruncScaled(prec uint8) Float64x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 +func (x Float64x8) TruncScaled(prec uint8) Float64x8 -/* TruncWithPrecisionMasked */ +/* TruncScaledMasked */ -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) TruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) TruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) TruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) TruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) TruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) TruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 + +/* TruncScaledResidue */ + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) TruncScaledResidue(prec uint8) Float32x4 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) TruncScaledResidue(prec uint8) Float32x8 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) TruncScaledResidue(prec uint8) Float32x16 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) TruncScaledResidue(prec uint8) Float64x2 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 + +/* TruncScaledResidueMasked */ + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index 4263b81cd734ab..c9fdfff0ffc61e 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -89,20 +89,20 @@ func TestToInt32(t *testing.T) { testFloat32x8UnaryToInt32(t, simd.Float32x8.ConvertToInt32, toInt32Slice[float32]) } -func TestDiffWithCeilWithPrecision(t *testing.T) { +func TestCeilScaledResidue(t *testing.T) { if !simd.HasAVX512() { t.Skip("Needs AVX512") } testFloat64x8UnaryFlaky(t, - func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(0) }, + func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(0) }, map1(ceilResidueForPrecision[float64](0)), 0.001) testFloat64x8UnaryFlaky(t, - func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(1) }, + func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(1) }, map1(ceilResidueForPrecision[float64](1)), 0.001) testFloat64x8Unary(t, - func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilWithPrecision(0)) }, + func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilScaled(0)) }, map1[float64](func(x float64) float64 { return x - math.Ceil(x) })) } From 82d056ddd7378ee23ab073c7a195d92cfc4a59d6 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 5 Aug 2025 04:28:44 +0000 Subject: [PATCH 112/139] [dev.simd] cmd/compile: add ShiftAll immediate variant This CL is generated by CL 693136. Change-Id: Ifd2278d3f927efa008a14cc5e592e7c14b7120ff Reviewed-on: https://go-review.googlesource.com/c/go/+/693157 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 87 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 144 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 54 + src/cmd/compile/internal/ssa/opGen.go | 837 ++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 1696 +++++++++++++++-- src/simd/simd_test.go | 18 + 6 files changed, 2687 insertions(+), 149 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 76ef42576d32c3..bd6af6221d54c4 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -689,7 +689,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORD512, ssa.OpAMD64VPRORQ128, ssa.OpAMD64VPRORQ256, - ssa.OpAMD64VPRORQ512: + ssa.OpAMD64VPRORQ512, + ssa.OpAMD64VPSLLW128const, + ssa.OpAMD64VPSLLW256const, + ssa.OpAMD64VPSLLW512const, + ssa.OpAMD64VPSLLD128const, + ssa.OpAMD64VPSLLD256const, + ssa.OpAMD64VPSLLD512const, + ssa.OpAMD64VPSLLQ128const, + ssa.OpAMD64VPSLLQ256const, + ssa.OpAMD64VPSLLQ512const, + ssa.OpAMD64VPSRLW128const, + ssa.OpAMD64VPSRLW256const, + ssa.OpAMD64VPSRLW512const, + ssa.OpAMD64VPSRLD128const, + ssa.OpAMD64VPSRLD256const, + ssa.OpAMD64VPSRLD512const, + ssa.OpAMD64VPSRLQ128const, + ssa.OpAMD64VPSRLQ256const, + ssa.OpAMD64VPSRLQ512const, + ssa.OpAMD64VPSRAW128const, + ssa.OpAMD64VPSRAW256const, + ssa.OpAMD64VPSRAW512const, + ssa.OpAMD64VPSRAD128const, + ssa.OpAMD64VPSRAD256const, + ssa.OpAMD64VPSRAD512const, + ssa.OpAMD64VPSRAQ128const, + ssa.OpAMD64VPSRAQ256const, + ssa.OpAMD64VPSRAQ512const: p = simdV11Imm8(s, v) case ssa.OpAMD64VRNDSCALEPSMasked128, @@ -715,7 +742,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORDMasked512, ssa.OpAMD64VPRORQMasked128, ssa.OpAMD64VPRORQMasked256, - ssa.OpAMD64VPRORQMasked512: + ssa.OpAMD64VPRORQMasked512, + ssa.OpAMD64VPSLLWMasked128const, + ssa.OpAMD64VPSLLWMasked256const, + ssa.OpAMD64VPSLLWMasked512const, + ssa.OpAMD64VPSLLDMasked128const, + ssa.OpAMD64VPSLLDMasked256const, + ssa.OpAMD64VPSLLDMasked512const, + ssa.OpAMD64VPSLLQMasked128const, + ssa.OpAMD64VPSLLQMasked256const, + ssa.OpAMD64VPSLLQMasked512const, + ssa.OpAMD64VPSRLWMasked128const, + ssa.OpAMD64VPSRLWMasked256const, + ssa.OpAMD64VPSRLWMasked512const, + ssa.OpAMD64VPSRLDMasked128const, + ssa.OpAMD64VPSRLDMasked256const, + ssa.OpAMD64VPSRLDMasked512const, + ssa.OpAMD64VPSRLQMasked128const, + ssa.OpAMD64VPSRLQMasked256const, + ssa.OpAMD64VPSRLQMasked512const, + ssa.OpAMD64VPSRAWMasked128const, + ssa.OpAMD64VPSRAWMasked256const, + ssa.OpAMD64VPSRAWMasked512const, + ssa.OpAMD64VPSRADMasked128const, + ssa.OpAMD64VPSRADMasked256const, + ssa.OpAMD64VPSRADMasked512const, + ssa.OpAMD64VPSRAQMasked128const, + ssa.OpAMD64VPSRAQMasked256const, + ssa.OpAMD64VPSRAQMasked512const: p = simdVkvImm8(s, v) case ssa.OpAMD64VDPPS128, @@ -1497,7 +1551,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPXORQMasked512: + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VPSLLWMasked128const, + ssa.OpAMD64VPSLLWMasked256const, + ssa.OpAMD64VPSLLWMasked512const, + ssa.OpAMD64VPSLLDMasked128const, + ssa.OpAMD64VPSLLDMasked256const, + ssa.OpAMD64VPSLLDMasked512const, + ssa.OpAMD64VPSLLQMasked128const, + ssa.OpAMD64VPSLLQMasked256const, + ssa.OpAMD64VPSLLQMasked512const, + ssa.OpAMD64VPSRLWMasked128const, + ssa.OpAMD64VPSRLWMasked256const, + ssa.OpAMD64VPSRLWMasked512const, + ssa.OpAMD64VPSRLDMasked128const, + ssa.OpAMD64VPSRLDMasked256const, + ssa.OpAMD64VPSRLDMasked512const, + ssa.OpAMD64VPSRLQMasked128const, + ssa.OpAMD64VPSRLQMasked256const, + ssa.OpAMD64VPSRLQMasked512const, + ssa.OpAMD64VPSRAWMasked128const, + ssa.OpAMD64VPSRAWMasked256const, + ssa.OpAMD64VPSRAWMasked512const, + ssa.OpAMD64VPSRADMasked128const, + ssa.OpAMD64VPSRADMasked256const, + ssa.OpAMD64VPSRADMasked512const, + ssa.OpAMD64VPSRAQMasked128const, + ssa.OpAMD64VPSRAQMasked256const, + ssa.OpAMD64VPSRAQMasked512const: x86.ParseSuffix(p, "Z") } diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 060f220c7de758..b8bd0d9b4cae7b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1345,24 +1345,42 @@ (SetElemUint16x8 ...) => (VPINSRW128 ...) (SetElemUint32x4 ...) => (VPINSRD128 ...) (SetElemUint64x2 ...) => (VPINSRQ128 ...) -(ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) -(ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) -(ShiftAllLeftInt16x32 ...) => (VPSLLW512 ...) -(ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) -(ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) -(ShiftAllLeftInt32x16 ...) => (VPSLLD512 ...) -(ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) -(ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) -(ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) -(ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) -(ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) -(ShiftAllLeftUint16x32 ...) => (VPSLLW512 ...) -(ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) -(ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) -(ShiftAllLeftUint32x16 ...) => (VPSLLD512 ...) -(ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) -(ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) -(ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) +(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) +(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftInt16x16 x y) => (VPSLLW256 x y) +(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftInt16x32 x y) => (VPSLLW512 x y) +(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftInt32x4 x y) => (VPSLLD128 x y) +(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftInt32x8 x y) => (VPSLLD256 x y) +(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftInt32x16 x y) => (VPSLLD512 x y) +(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftInt64x2 x y) => (VPSLLQ128 x y) +(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftInt64x4 x y) => (VPSLLQ256 x y) +(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftInt64x8 x y) => (VPSLLQ512 x y) +(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftUint16x8 x y) => (VPSLLW128 x y) +(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftUint16x16 x y) => (VPSLLW256 x y) +(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftUint16x32 x y) => (VPSLLW512 x y) +(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftUint32x4 x y) => (VPSLLD128 x y) +(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftUint32x8 x y) => (VPSLLD256 x y) +(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftUint32x16 x y) => (VPSLLD512 x y) +(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftUint64x2 x y) => (VPSLLQ128 x y) +(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftUint64x4 x y) => (VPSLLQ256 x y) +(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftUint64x8 x y) => (VPSLLQ512 x y) (ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) (ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) (ShiftAllLeftConcatInt16x32 ...) => (VPSHLDW512 ...) @@ -1399,42 +1417,78 @@ (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) -(ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) -(ShiftAllRightInt16x32 ...) => (VPSRAW512 ...) -(ShiftAllRightInt32x4 ...) => (VPSRAD128 ...) -(ShiftAllRightInt32x8 ...) => (VPSRAD256 ...) -(ShiftAllRightInt32x16 ...) => (VPSRAD512 ...) -(ShiftAllRightInt64x2 ...) => (VPSRAQ128 ...) -(ShiftAllRightInt64x4 ...) => (VPSRAQ256 ...) -(ShiftAllRightInt64x8 ...) => (VPSRAQ512 ...) -(ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) -(ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) -(ShiftAllRightUint16x32 ...) => (VPSRLW512 ...) -(ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) -(ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) -(ShiftAllRightUint32x16 ...) => (VPSRLD512 ...) -(ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) -(ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) -(ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [int8(c)] x) +(ShiftAllRightInt16x8 x y) => (VPSRAW128 x y) +(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [int8(c)] x) +(ShiftAllRightInt16x16 x y) => (VPSRAW256 x y) +(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [int8(c)] x) +(ShiftAllRightInt16x32 x y) => (VPSRAW512 x y) +(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [int8(c)] x) +(ShiftAllRightInt32x4 x y) => (VPSRAD128 x y) +(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [int8(c)] x) +(ShiftAllRightInt32x8 x y) => (VPSRAD256 x y) +(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [int8(c)] x) +(ShiftAllRightInt32x16 x y) => (VPSRAD512 x y) +(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [int8(c)] x) +(ShiftAllRightInt64x2 x y) => (VPSRAQ128 x y) +(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [int8(c)] x) +(ShiftAllRightInt64x4 x y) => (VPSRAQ256 x y) +(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [int8(c)] x) +(ShiftAllRightInt64x8 x y) => (VPSRAQ512 x y) +(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [int8(c)] x) +(ShiftAllRightUint16x8 x y) => (VPSRLW128 x y) +(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [int8(c)] x) +(ShiftAllRightUint16x16 x y) => (VPSRLW256 x y) +(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [int8(c)] x) +(ShiftAllRightUint16x32 x y) => (VPSRLW512 x y) +(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [int8(c)] x) +(ShiftAllRightUint32x4 x y) => (VPSRLD128 x y) +(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [int8(c)] x) +(ShiftAllRightUint32x8 x y) => (VPSRLD256 x y) +(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [int8(c)] x) +(ShiftAllRightUint32x16 x y) => (VPSRLD512 x y) +(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [int8(c)] x) +(ShiftAllRightUint64x2 x y) => (VPSRLQ128 x y) +(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [int8(c)] x) +(ShiftAllRightUint64x4 x y) => (VPSRLQ256 x y) +(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [int8(c)] x) +(ShiftAllRightUint64x8 x y) => (VPSRLQ512 x y) (ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) (ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) (ShiftAllRightConcatInt16x32 ...) => (VPSHRDW512 ...) @@ -1471,23 +1525,41 @@ (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index adb6dd968f581d..8b7a7791bc3cfc 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1002,5 +1002,59 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index a69612f28a1aa3..15fcabbb8d3476 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2221,6 +2221,60 @@ const ( OpAMD64VPSHRDQMasked128 OpAMD64VPSHRDQMasked256 OpAMD64VPSHRDQMasked512 + OpAMD64VPSLLW128const + OpAMD64VPSLLW256const + OpAMD64VPSLLW512const + OpAMD64VPSLLD128const + OpAMD64VPSLLD256const + OpAMD64VPSLLD512const + OpAMD64VPSLLQ128const + OpAMD64VPSLLQ256const + OpAMD64VPSLLQ512const + OpAMD64VPSLLWMasked128const + OpAMD64VPSLLWMasked256const + OpAMD64VPSLLWMasked512const + OpAMD64VPSLLDMasked128const + OpAMD64VPSLLDMasked256const + OpAMD64VPSLLDMasked512const + OpAMD64VPSLLQMasked128const + OpAMD64VPSLLQMasked256const + OpAMD64VPSLLQMasked512const + OpAMD64VPSRLW128const + OpAMD64VPSRLW256const + OpAMD64VPSRLW512const + OpAMD64VPSRLD128const + OpAMD64VPSRLD256const + OpAMD64VPSRLD512const + OpAMD64VPSRLQ128const + OpAMD64VPSRLQ256const + OpAMD64VPSRLQ512const + OpAMD64VPSRAW128const + OpAMD64VPSRAW256const + OpAMD64VPSRAW512const + OpAMD64VPSRAD128const + OpAMD64VPSRAD256const + OpAMD64VPSRAD512const + OpAMD64VPSRAQ128const + OpAMD64VPSRAQ256const + OpAMD64VPSRAQ512const + OpAMD64VPSRLWMasked128const + OpAMD64VPSRLWMasked256const + OpAMD64VPSRLWMasked512const + OpAMD64VPSRLDMasked128const + OpAMD64VPSRLDMasked256const + OpAMD64VPSRLDMasked512const + OpAMD64VPSRLQMasked128const + OpAMD64VPSRLQMasked256const + OpAMD64VPSRLQMasked512const + OpAMD64VPSRAWMasked128const + OpAMD64VPSRAWMasked256const + OpAMD64VPSRAWMasked512const + OpAMD64VPSRADMasked128const + OpAMD64VPSRADMasked256const + OpAMD64VPSRADMasked512const + OpAMD64VPSRAQMasked128const + OpAMD64VPSRAQMasked256const + OpAMD64VPSRAQMasked512const OpARMADD OpARMADDconst @@ -34317,6 +34371,789 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLW256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLW512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLD128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLD256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLD512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQ128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQ256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQ512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLWMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLWMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLWMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLDMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLDMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLDMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLD128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQ128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQ256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQ512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAW128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAD128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLWMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAWMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAWMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAWMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRADMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRADMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRADMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAQMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAQMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAQMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ADD", diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index f0b25d3c5d125f..2e564b0c307718 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4451,32 +4451,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHLDQ512 return true case OpShiftAllLeftInt16x16: - v.Op = OpAMD64VPSLLW256 - return true + return rewriteValueAMD64_OpShiftAllLeftInt16x16(v) case OpShiftAllLeftInt16x32: - v.Op = OpAMD64VPSLLW512 - return true + return rewriteValueAMD64_OpShiftAllLeftInt16x32(v) case OpShiftAllLeftInt16x8: - v.Op = OpAMD64VPSLLW128 - return true + return rewriteValueAMD64_OpShiftAllLeftInt16x8(v) case OpShiftAllLeftInt32x16: - v.Op = OpAMD64VPSLLD512 - return true + return rewriteValueAMD64_OpShiftAllLeftInt32x16(v) case OpShiftAllLeftInt32x4: - v.Op = OpAMD64VPSLLD128 - return true + return rewriteValueAMD64_OpShiftAllLeftInt32x4(v) case OpShiftAllLeftInt32x8: - v.Op = OpAMD64VPSLLD256 - return true + return rewriteValueAMD64_OpShiftAllLeftInt32x8(v) case OpShiftAllLeftInt64x2: - v.Op = OpAMD64VPSLLQ128 - return true + return rewriteValueAMD64_OpShiftAllLeftInt64x2(v) case OpShiftAllLeftInt64x4: - v.Op = OpAMD64VPSLLQ256 - return true + return rewriteValueAMD64_OpShiftAllLeftInt64x4(v) case OpShiftAllLeftInt64x8: - v.Op = OpAMD64VPSLLQ512 - return true + return rewriteValueAMD64_OpShiftAllLeftInt64x8(v) case OpShiftAllLeftMaskedInt16x16: return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v) case OpShiftAllLeftMaskedInt16x32: @@ -4514,32 +4505,23 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftMaskedUint64x8: return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v) case OpShiftAllLeftUint16x16: - v.Op = OpAMD64VPSLLW256 - return true + return rewriteValueAMD64_OpShiftAllLeftUint16x16(v) case OpShiftAllLeftUint16x32: - v.Op = OpAMD64VPSLLW512 - return true + return rewriteValueAMD64_OpShiftAllLeftUint16x32(v) case OpShiftAllLeftUint16x8: - v.Op = OpAMD64VPSLLW128 - return true + return rewriteValueAMD64_OpShiftAllLeftUint16x8(v) case OpShiftAllLeftUint32x16: - v.Op = OpAMD64VPSLLD512 - return true + return rewriteValueAMD64_OpShiftAllLeftUint32x16(v) case OpShiftAllLeftUint32x4: - v.Op = OpAMD64VPSLLD128 - return true + return rewriteValueAMD64_OpShiftAllLeftUint32x4(v) case OpShiftAllLeftUint32x8: - v.Op = OpAMD64VPSLLD256 - return true + return rewriteValueAMD64_OpShiftAllLeftUint32x8(v) case OpShiftAllLeftUint64x2: - v.Op = OpAMD64VPSLLQ128 - return true + return rewriteValueAMD64_OpShiftAllLeftUint64x2(v) case OpShiftAllLeftUint64x4: - v.Op = OpAMD64VPSLLQ256 - return true + return rewriteValueAMD64_OpShiftAllLeftUint64x4(v) case OpShiftAllLeftUint64x8: - v.Op = OpAMD64VPSLLQ512 - return true + return rewriteValueAMD64_OpShiftAllLeftUint64x8(v) case OpShiftAllRightConcatInt16x16: v.Op = OpAMD64VPSHRDW256 return true @@ -4631,32 +4613,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: - v.Op = OpAMD64VPSRAW256 - return true + return rewriteValueAMD64_OpShiftAllRightInt16x16(v) case OpShiftAllRightInt16x32: - v.Op = OpAMD64VPSRAW512 - return true + return rewriteValueAMD64_OpShiftAllRightInt16x32(v) case OpShiftAllRightInt16x8: - v.Op = OpAMD64VPSRAW128 - return true + return rewriteValueAMD64_OpShiftAllRightInt16x8(v) case OpShiftAllRightInt32x16: - v.Op = OpAMD64VPSRAD512 - return true + return rewriteValueAMD64_OpShiftAllRightInt32x16(v) case OpShiftAllRightInt32x4: - v.Op = OpAMD64VPSRAD128 - return true + return rewriteValueAMD64_OpShiftAllRightInt32x4(v) case OpShiftAllRightInt32x8: - v.Op = OpAMD64VPSRAD256 - return true + return rewriteValueAMD64_OpShiftAllRightInt32x8(v) case OpShiftAllRightInt64x2: - v.Op = OpAMD64VPSRAQ128 - return true + return rewriteValueAMD64_OpShiftAllRightInt64x2(v) case OpShiftAllRightInt64x4: - v.Op = OpAMD64VPSRAQ256 - return true + return rewriteValueAMD64_OpShiftAllRightInt64x4(v) case OpShiftAllRightInt64x8: - v.Op = OpAMD64VPSRAQ512 - return true + return rewriteValueAMD64_OpShiftAllRightInt64x8(v) case OpShiftAllRightMaskedInt16x16: return rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v) case OpShiftAllRightMaskedInt16x32: @@ -4694,32 +4667,23 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) case OpShiftAllRightUint16x16: - v.Op = OpAMD64VPSRLW256 - return true + return rewriteValueAMD64_OpShiftAllRightUint16x16(v) case OpShiftAllRightUint16x32: - v.Op = OpAMD64VPSRLW512 - return true + return rewriteValueAMD64_OpShiftAllRightUint16x32(v) case OpShiftAllRightUint16x8: - v.Op = OpAMD64VPSRLW128 - return true + return rewriteValueAMD64_OpShiftAllRightUint16x8(v) case OpShiftAllRightUint32x16: - v.Op = OpAMD64VPSRLD512 - return true + return rewriteValueAMD64_OpShiftAllRightUint32x16(v) case OpShiftAllRightUint32x4: - v.Op = OpAMD64VPSRLD128 - return true + return rewriteValueAMD64_OpShiftAllRightUint32x4(v) case OpShiftAllRightUint32x8: - v.Op = OpAMD64VPSRLD256 - return true + return rewriteValueAMD64_OpShiftAllRightUint32x8(v) case OpShiftAllRightUint64x2: - v.Op = OpAMD64VPSRLQ128 - return true + return rewriteValueAMD64_OpShiftAllRightUint64x2(v) case OpShiftAllRightUint64x4: - v.Op = OpAMD64VPSRLQ256 - return true + return rewriteValueAMD64_OpShiftAllRightUint64x4(v) case OpShiftAllRightUint64x8: - v.Op = OpAMD64VPSRLQ512 - return true + return rewriteValueAMD64_OpShiftAllRightUint64x8(v) case OpShiftLeftConcatInt16x16: v.Op = OpAMD64VPSHLDVW256 return true @@ -50791,11 +50755,261 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt16x16 x (MOVQconst [c])) + // result: (VPSLLW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt16x16 x y) + // result: (VPSLLW256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLW256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt16x32 x (MOVQconst [c])) + // result: (VPSLLW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt16x32 x y) + // result: (VPSLLW512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLW512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt16x8 x (MOVQconst [c])) + // result: (VPSLLW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt16x8 x y) + // result: (VPSLLW128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLW128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt32x16 x (MOVQconst [c])) + // result: (VPSLLD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt32x16 x y) + // result: (VPSLLD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt32x4 x (MOVQconst [c])) + // result: (VPSLLD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt32x4 x y) + // result: (VPSLLD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt32x8 x (MOVQconst [c])) + // result: (VPSLLD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt32x8 x y) + // result: (VPSLLD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt64x2 x (MOVQconst [c])) + // result: (VPSLLQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt64x2 x y) + // result: (VPSLLQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt64x4 x (MOVQconst [c])) + // result: (VPSLLQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt64x4 x y) + // result: (VPSLLQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt64x8 x (MOVQconst [c])) + // result: (VPSLLQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt64x8 x y) + // result: (VPSLLQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ512) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt16x16 x y mask) // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -50814,6 +51028,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt16x32 x y mask) // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -50832,6 +51062,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt16x8 x y mask) // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -50850,6 +51096,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt32x16 x y mask) // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -50868,6 +51130,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt32x4 x y mask) // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -50886,6 +51164,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt32x8 x y mask) // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -50904,6 +51198,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt64x2 x y mask) // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -50922,6 +51232,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt64x4 x y mask) // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -50940,6 +51266,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt64x8 x y mask) // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -50958,6 +51300,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint16x16 x y mask) // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -50976,6 +51334,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint16x32 x y mask) // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -50994,6 +51368,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint16x8 x y mask) // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -51012,6 +51402,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint32x16 x y mask) // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -51030,6 +51436,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint32x4 x y mask) // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -51048,6 +51470,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint32x8 x y mask) // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -51066,6 +51504,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint64x2 x y mask) // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -51084,6 +51538,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint64x4 x y mask) // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -51102,6 +51572,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint64x8 x y mask) // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -51115,68 +51601,302 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftUint16x16 x (MOVQconst [c])) + // result: (VPSLLW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint16x16 x y) + // result: (VPSLLW256 x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPSLLW256) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftUint16x32 x (MOVQconst [c])) + // result: (VPSLLW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint16x32 x y) + // result: (VPSLLW512 x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPSLLW512) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftUint16x8 x (MOVQconst [c])) + // result: (VPSLLW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint16x8 x y) + // result: (VPSLLW128 x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPSLLW128) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint32x16 x (MOVQconst [c])) + // result: (VPSLLD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint32x16 x y) + // result: (VPSLLD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint32x4 x (MOVQconst [c])) + // result: (VPSLLD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint32x4 x y) + // result: (VPSLLD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint32x8 x (MOVQconst [c])) + // result: (VPSLLD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint32x8 x y) + // result: (VPSLLD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint64x2 x (MOVQconst [c])) + // result: (VPSLLQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint64x2 x y) + // result: (VPSLLQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint64x4 x (MOVQconst [c])) + // result: (VPSLLQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint64x4 x y) + // result: (VPSLLQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint64x8 x (MOVQconst [c])) + // result: (VPSLLQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint64x8 x y) + // result: (VPSLLQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block @@ -51475,11 +52195,261 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt16x16 x (MOVQconst [c])) + // result: (VPSRAW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt16x16 x y) + // result: (VPSRAW256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAW256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt16x32 x (MOVQconst [c])) + // result: (VPSRAW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt16x32 x y) + // result: (VPSRAW512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAW512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt16x8 x (MOVQconst [c])) + // result: (VPSRAW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt16x8 x y) + // result: (VPSRAW128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAW128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt32x16 x (MOVQconst [c])) + // result: (VPSRAD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt32x16 x y) + // result: (VPSRAD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt32x4 x (MOVQconst [c])) + // result: (VPSRAD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt32x4 x y) + // result: (VPSRAD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt32x8 x (MOVQconst [c])) + // result: (VPSRAD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt32x8 x y) + // result: (VPSRAD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt64x2 x (MOVQconst [c])) + // result: (VPSRAQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt64x2 x y) + // result: (VPSRAQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt64x4 x (MOVQconst [c])) + // result: (VPSRAQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt64x4 x y) + // result: (VPSRAQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt64x8 x (MOVQconst [c])) + // result: (VPSRAQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt64x8 x y) + // result: (VPSRAQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAQ512) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt16x16 x y mask) // result: (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -51498,6 +52468,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt16x32 x y mask) // result: (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -51516,6 +52502,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt16x8 x y mask) // result: (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -51534,6 +52536,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) + // result: (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt32x16 x y mask) // result: (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -51552,6 +52570,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) + // result: (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt32x4 x y mask) // result: (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -51570,6 +52604,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) + // result: (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt32x8 x y mask) // result: (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -51588,6 +52638,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt64x2 x y mask) // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -51606,6 +52672,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt64x4 x y mask) // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -51624,6 +52706,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt64x8 x y mask) // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -51642,6 +52740,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) + // result: (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint16x16 x y mask) // result: (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -51660,6 +52774,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) + // result: (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint16x32 x y mask) // result: (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -51678,6 +52808,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) + // result: (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint16x8 x y mask) // result: (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -51696,6 +52842,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) + // result: (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLDMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint32x16 x y mask) // result: (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -51714,6 +52876,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) + // result: (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLDMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint32x4 x y mask) // result: (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -51732,6 +52910,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) + // result: (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLDMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint32x8 x y mask) // result: (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -51750,6 +52944,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) + // result: (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint64x2 x y mask) // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -51768,6 +52978,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) + // result: (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint64x4 x y mask) // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -51786,6 +53012,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) + // result: (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint64x8 x y mask) // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -51799,6 +53041,240 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint16x16 x (MOVQconst [c])) + // result: (VPSRLW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint16x16 x y) + // result: (VPSRLW256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLW256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint16x32 x (MOVQconst [c])) + // result: (VPSRLW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint16x32 x y) + // result: (VPSRLW512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLW512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint16x8 x (MOVQconst [c])) + // result: (VPSRLW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint16x8 x y) + // result: (VPSRLW128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLW128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint32x16 x (MOVQconst [c])) + // result: (VPSRLD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint32x16 x y) + // result: (VPSRLD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint32x4 x (MOVQconst [c])) + // result: (VPSRLD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint32x4 x y) + // result: (VPSRLD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint32x8 x (MOVQconst [c])) + // result: (VPSRLD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint32x8 x y) + // result: (VPSRLD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint64x2 x (MOVQconst [c])) + // result: (VPSRLQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint64x2 x y) + // result: (VPSRLQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint64x4 x (MOVQconst [c])) + // result: (VPSRLQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint64x4 x y) + // result: (VPSRLQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint64x8 x (MOVQconst [c])) + // result: (VPSRLQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint64x8 x y) + // result: (VPSRLQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLQ512) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 2326addea94b8e..1df27f875760aa 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -206,6 +206,24 @@ func TestPairDotProdAccumulate(t *testing.T) { } } +var testShiftAllVal uint64 = 3 + +func TestShiftAll(t *testing.T) { + got := make([]int32, 4) + simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(2).StoreSlice(got) + for _, v := range got { + if v != 0b1100 { + t.Errorf("expect 0b1100, got %b", v) + } + } + simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(testShiftAllVal).StoreSlice(got) + for _, v := range got { + if v != 0b11000 { + t.Errorf("expect 0b11000, got %b", v) + } + } +} + func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} From 7ca34599ec4df8a21b7d4580f7e1c716c44f7e0f Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 4 Aug 2025 15:19:54 -0400 Subject: [PATCH 113/139] [dev.simd] simd, cmd/compile: generated files to add 'blend' and 'blendMasked' Generated by arch/internal/simdgen CL 693175 These methods are not public because of simdgen-induced name/signature issues, and because their addition was motivated by the need for emulation tools. The specific name signature problems are: 1) one set of instructions has the "Masked" suffix (because of how that is incorporated into names) and the other set does not (though I suppose the operation could be renamed). 2) because the AVX2 instruction is bytes-only, to get the signature right, requires "OverwriteBase" but OverwriteBase also requires OverwriteClass and "simdgen does not support [OverwriteClass] in inputs". 3) the default operation order is false, true, but we want this in a "x.Merged(y, mask)" that pairs with "x.Masked(mask)" where the true case is x and the false case is y/zero, but the default ordering for VPBLENDVB and VPBLENDMB is false->x and true->y. 4) VPBLENDVB only comes in byte width, which causes problems for floats. All this may get fixed in the future, for now it is just an implementation detail. Change-Id: I61b655c7011e2c33f8644f704f886133c89d2f15 Reviewed-on: https://go-review.googlesource.com/c/go/+/693155 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 14 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 6 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 6 + .../internal/ssa/_gen/simdgenericOps.go | 6 + src/cmd/compile/internal/ssa/opGen.go | 132 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 86 ++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 6 + src/simd/ops_amd64.go | 48 +++++++ 8 files changed, 303 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index bd6af6221d54c4..e0571d2cc37a96 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -589,7 +589,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPXORQMasked512: + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VPBLENDMBMasked512, + ssa.OpAMD64VPBLENDMWMasked512, + ssa.OpAMD64VPBLENDMDMasked512, + ssa.OpAMD64VPBLENDMQMasked512: p = simdV2kv(s, v) case ssa.OpAMD64VPABSBMasked128, @@ -660,6 +664,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked512: p = simdVkv(s, v) + case ssa.OpAMD64VPBLENDVB128, + ssa.OpAMD64VPBLENDVB256: + p = simdV31(s, v) + case ssa.OpAMD64VROUNDPS128, ssa.OpAMD64VROUNDPS256, ssa.OpAMD64VROUNDPD128, @@ -1552,6 +1560,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VPBLENDMBMasked512, + ssa.OpAMD64VPBLENDMWMasked512, + ssa.OpAMD64VPBLENDMDMasked512, + ssa.OpAMD64VPBLENDMQMasked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index b8bd0d9b4cae7b..9a4c82c0afcd3e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1891,3 +1891,9 @@ (XorMaskedUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) (XorMaskedUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) (XorMaskedUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(blendInt8x16 ...) => (VPBLENDVB128 ...) +(blendInt8x32 ...) => (VPBLENDVB256 ...) +(blendMaskedInt8x64 x y mask) => (VPBLENDMBMasked512 x y (VPMOVVec8x64ToM mask)) +(blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM mask)) +(blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM mask)) +(blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 8b7a7791bc3cfc..7860a0889eb65d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -227,6 +227,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMBMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMDMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMQMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMWMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDVB128", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBLENDVB256", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ea52254413f792..bf85df5e6dadad 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1558,6 +1558,12 @@ func simdGenericOps() []opData { {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "XorUint64x8", argLength: 2, commutative: true}, + {name: "blendInt8x16", argLength: 3, commutative: false}, + {name: "blendInt8x32", argLength: 3, commutative: false}, + {name: "blendMaskedInt8x64", argLength: 3, commutative: false}, + {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, + {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, + {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 15fcabbb8d3476..9ce9220901cb7d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1446,6 +1446,12 @@ const ( OpAMD64VPAVGWMasked128 OpAMD64VPAVGWMasked256 OpAMD64VPAVGWMasked512 + OpAMD64VPBLENDMBMasked512 + OpAMD64VPBLENDMDMasked512 + OpAMD64VPBLENDMQMasked512 + OpAMD64VPBLENDMWMasked512 + OpAMD64VPBLENDVB128 + OpAMD64VPBLENDVB256 OpAMD64VPCMPEQB128 OpAMD64VPCMPEQB256 OpAMD64VPCMPEQB512 @@ -6109,6 +6115,12 @@ const ( OpXorUint64x2 OpXorUint64x4 OpXorUint64x8 + OpblendInt8x16 + OpblendInt8x32 + OpblendMaskedInt8x64 + OpblendMaskedInt16x32 + OpblendMaskedInt32x16 + OpblendMaskedInt64x8 OpCeilScaledFloat32x4 OpCeilScaledFloat32x8 OpCeilScaledFloat32x16 @@ -22710,6 +22722,96 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPBLENDMBMasked512", + argLen: 3, + asm: x86.AVPBLENDMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDMDMasked512", + argLen: 3, + asm: x86.AVPBLENDMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDMQMasked512", + argLen: 3, + asm: x86.AVPBLENDMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDMWMasked512", + argLen: 3, + asm: x86.AVPBLENDMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDVB128", + argLen: 3, + asm: x86.AVPBLENDVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDVB256", + argLen: 3, + asm: x86.AVPBLENDVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB128", argLen: 2, @@ -70897,6 +70999,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "blendInt8x16", + argLen: 3, + generic: true, + }, + { + name: "blendInt8x32", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt8x64", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "CeilScaledFloat32x4", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2e564b0c307718..e181798245882f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5659,6 +5659,20 @@ func rewriteValueAMD64(v *Value) bool { return true case OpZeroSIMD: return rewriteValueAMD64_OpZeroSIMD(v) + case OpblendInt8x16: + v.Op = OpAMD64VPBLENDVB128 + return true + case OpblendInt8x32: + v.Op = OpAMD64VPBLENDVB256 + return true + case OpblendMaskedInt16x32: + return rewriteValueAMD64_OpblendMaskedInt16x32(v) + case OpblendMaskedInt32x16: + return rewriteValueAMD64_OpblendMaskedInt32x16(v) + case OpblendMaskedInt64x8: + return rewriteValueAMD64_OpblendMaskedInt64x8(v) + case OpblendMaskedInt8x64: + return rewriteValueAMD64_OpblendMaskedInt8x64(v) } return false } @@ -57117,6 +57131,78 @@ func rewriteValueAMD64_OpZeroSIMD(v *Value) bool { } return false } +func rewriteValueAMD64_OpblendMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt16x32 x y mask) + // result: (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpblendMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt32x16 x y mask) + // result: (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpblendMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt64x8 x y mask) + // result: (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt8x64 x y mask) + // result: (VPBLENDMBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 511974ffa1bf34..fb68846347d273 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1830,6 +1830,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.XorMasked", opLen3(ssa.OpXorMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.XorMasked", opLen3(ssa.OpXorMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.XorMasked", opLen3(ssa.OpXorMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.blend", opLen3(ssa.OpblendInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.blend", opLen3(ssa.OpblendInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.blendMasked", opLen3(ssa.OpblendMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index dc42e73a53a2dc..61a708b56e0cfb 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -12119,6 +12119,54 @@ func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 +/* blend */ + +// blend blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// Asm: VPBLENDVB, CPU Feature: AVX +func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16 + +// blend blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// Asm: VPBLENDVB, CPU Feature: AVX2 +func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 + +/* blendMasked */ + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMB, CPU Feature: AVX512BW +func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMW, CPU Feature: AVX512BW +func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMD, CPU Feature: AVX512F +func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMQ, CPU Feature: AVX512F +func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 + // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) From d3cf582f8ab21fb9ec88753c780bc26257db6ac4 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 5 Aug 2025 19:07:51 +0000 Subject: [PATCH 114/139] [dev.simd] cmd/compile, simd: (Set|Get)(Lo|Hi) This CL is generated by CL 693335. Change-Id: Ie9adda526573f979ec7e4f535033ba29236cc5cb Reviewed-on: https://go-review.googlesource.com/c/go/+/693355 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 4 + .../compile/internal/ssa/_gen/simdAMD64.rules | 100 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 12 +- .../internal/ssa/_gen/simdgenericOps.go | 100 +- src/cmd/compile/internal/ssa/opGen.go | 762 +++++++--- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1260 ++++++++++++++++- .../compile/internal/ssagen/simdintrinsics.go | 100 +- src/simd/ops_amd64.go | 516 +++++-- src/simd/simd_test.go | 87 -- src/simd/slicepart_amd64.go | 20 +- 10 files changed, 2434 insertions(+), 527 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e0571d2cc37a96..7a0a0be58fa329 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -685,7 +685,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPD256, ssa.OpAMD64VREDUCEPD512, ssa.OpAMD64VEXTRACTF128128, + ssa.OpAMD64VEXTRACTF64X4256, ssa.OpAMD64VEXTRACTI128128, + ssa.OpAMD64VEXTRACTI64X4256, ssa.OpAMD64VPROLD128, ssa.OpAMD64VPROLD256, ssa.OpAMD64VPROLD512, @@ -794,7 +796,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VGF2P8AFFINEINVQB256, ssa.OpAMD64VGF2P8AFFINEINVQB512, ssa.OpAMD64VINSERTF128256, + ssa.OpAMD64VINSERTF64X4512, ssa.OpAMD64VINSERTI128256, + ssa.OpAMD64VINSERTI64X4512, ssa.OpAMD64VPSHLDW128, ssa.OpAMD64VPSHLDW256, ssa.OpAMD64VPSHLDW512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 9a4c82c0afcd3e..316db1b8411068 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -467,16 +467,6 @@ (GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(Get128Float32x8 ...) => (VEXTRACTF128128 ...) -(Get128Float64x4 ...) => (VEXTRACTF128128 ...) -(Get128Int8x32 ...) => (VEXTRACTI128128 ...) -(Get128Int16x16 ...) => (VEXTRACTI128128 ...) -(Get128Int32x8 ...) => (VEXTRACTI128128 ...) -(Get128Int64x4 ...) => (VEXTRACTI128128 ...) -(Get128Uint8x32 ...) => (VEXTRACTI128128 ...) -(Get128Uint16x16 ...) => (VEXTRACTI128128 ...) -(Get128Uint32x8 ...) => (VEXTRACTI128128 ...) -(Get128Uint64x4 ...) => (VEXTRACTI128128 ...) (GetElemInt8x16 ...) => (VPEXTRB128 ...) (GetElemInt16x8 ...) => (VPEXTRW128 ...) (GetElemInt32x4 ...) => (VPEXTRD128 ...) @@ -485,6 +475,46 @@ (GetElemUint16x8 ...) => (VPEXTRW128 ...) (GetElemUint32x4 ...) => (VPEXTRD128 ...) (GetElemUint64x2 ...) => (VPEXTRQ128 ...) +(GetHiFloat32x8 x) => (VEXTRACTF128128 [1] x) +(GetHiFloat32x16 x) => (VEXTRACTF64X4256 [1] x) +(GetHiFloat64x4 x) => (VEXTRACTF128128 [1] x) +(GetHiFloat64x8 x) => (VEXTRACTF64X4256 [1] x) +(GetHiInt8x32 x) => (VEXTRACTI128128 [1] x) +(GetHiInt8x64 x) => (VEXTRACTI64X4256 [1] x) +(GetHiInt16x16 x) => (VEXTRACTI128128 [1] x) +(GetHiInt16x32 x) => (VEXTRACTI64X4256 [1] x) +(GetHiInt32x8 x) => (VEXTRACTI128128 [1] x) +(GetHiInt32x16 x) => (VEXTRACTI64X4256 [1] x) +(GetHiInt64x4 x) => (VEXTRACTI128128 [1] x) +(GetHiInt64x8 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint8x32 x) => (VEXTRACTI128128 [1] x) +(GetHiUint8x64 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint16x16 x) => (VEXTRACTI128128 [1] x) +(GetHiUint16x32 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint32x8 x) => (VEXTRACTI128128 [1] x) +(GetHiUint32x16 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint64x4 x) => (VEXTRACTI128128 [1] x) +(GetHiUint64x8 x) => (VEXTRACTI64X4256 [1] x) +(GetLoFloat32x8 x) => (VEXTRACTF128128 [0] x) +(GetLoFloat32x16 x) => (VEXTRACTF64X4256 [0] x) +(GetLoFloat64x4 x) => (VEXTRACTF128128 [0] x) +(GetLoFloat64x8 x) => (VEXTRACTF64X4256 [0] x) +(GetLoInt8x32 x) => (VEXTRACTI128128 [0] x) +(GetLoInt8x64 x) => (VEXTRACTI64X4256 [0] x) +(GetLoInt16x16 x) => (VEXTRACTI128128 [0] x) +(GetLoInt16x32 x) => (VEXTRACTI64X4256 [0] x) +(GetLoInt32x8 x) => (VEXTRACTI128128 [0] x) +(GetLoInt32x16 x) => (VEXTRACTI64X4256 [0] x) +(GetLoInt64x4 x) => (VEXTRACTI128128 [0] x) +(GetLoInt64x8 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint8x32 x) => (VEXTRACTI128128 [0] x) +(GetLoUint8x64 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint16x16 x) => (VEXTRACTI128128 [0] x) +(GetLoUint16x32 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint32x8 x) => (VEXTRACTI128128 [0] x) +(GetLoUint32x16 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint64x4 x) => (VEXTRACTI128128 [0] x) +(GetLoUint64x8 x) => (VEXTRACTI64X4256 [0] x) (GreaterFloat32x4 x y) => (VCMPPS128 [14] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [14] x y) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) @@ -1327,16 +1357,6 @@ (ScaleMaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) (ScaleMaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) (ScaleMaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) -(Set128Float32x8 ...) => (VINSERTF128256 ...) -(Set128Float64x4 ...) => (VINSERTF128256 ...) -(Set128Int8x32 ...) => (VINSERTI128256 ...) -(Set128Int16x16 ...) => (VINSERTI128256 ...) -(Set128Int32x8 ...) => (VINSERTI128256 ...) -(Set128Int64x4 ...) => (VINSERTI128256 ...) -(Set128Uint8x32 ...) => (VINSERTI128256 ...) -(Set128Uint16x16 ...) => (VINSERTI128256 ...) -(Set128Uint32x8 ...) => (VINSERTI128256 ...) -(Set128Uint64x4 ...) => (VINSERTI128256 ...) (SetElemInt8x16 ...) => (VPINSRB128 ...) (SetElemInt16x8 ...) => (VPINSRW128 ...) (SetElemInt32x4 ...) => (VPINSRD128 ...) @@ -1345,6 +1365,46 @@ (SetElemUint16x8 ...) => (VPINSRW128 ...) (SetElemUint32x4 ...) => (VPINSRD128 ...) (SetElemUint64x2 ...) => (VPINSRQ128 ...) +(SetHiFloat32x8 x y) => (VINSERTF128256 [1] x y) +(SetHiFloat32x16 x y) => (VINSERTF64X4512 [1] x y) +(SetHiFloat64x4 x y) => (VINSERTF128256 [1] x y) +(SetHiFloat64x8 x y) => (VINSERTF64X4512 [1] x y) +(SetHiInt8x32 x y) => (VINSERTI128256 [1] x y) +(SetHiInt8x64 x y) => (VINSERTI64X4512 [1] x y) +(SetHiInt16x16 x y) => (VINSERTI128256 [1] x y) +(SetHiInt16x32 x y) => (VINSERTI64X4512 [1] x y) +(SetHiInt32x8 x y) => (VINSERTI128256 [1] x y) +(SetHiInt32x16 x y) => (VINSERTI64X4512 [1] x y) +(SetHiInt64x4 x y) => (VINSERTI128256 [1] x y) +(SetHiInt64x8 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint8x32 x y) => (VINSERTI128256 [1] x y) +(SetHiUint8x64 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint16x16 x y) => (VINSERTI128256 [1] x y) +(SetHiUint16x32 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint32x8 x y) => (VINSERTI128256 [1] x y) +(SetHiUint32x16 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint64x4 x y) => (VINSERTI128256 [1] x y) +(SetHiUint64x8 x y) => (VINSERTI64X4512 [1] x y) +(SetLoFloat32x8 x y) => (VINSERTF128256 [0] x y) +(SetLoFloat32x16 x y) => (VINSERTF64X4512 [0] x y) +(SetLoFloat64x4 x y) => (VINSERTF128256 [0] x y) +(SetLoFloat64x8 x y) => (VINSERTF64X4512 [0] x y) +(SetLoInt8x32 x y) => (VINSERTI128256 [0] x y) +(SetLoInt8x64 x y) => (VINSERTI64X4512 [0] x y) +(SetLoInt16x16 x y) => (VINSERTI128256 [0] x y) +(SetLoInt16x32 x y) => (VINSERTI64X4512 [0] x y) +(SetLoInt32x8 x y) => (VINSERTI128256 [0] x y) +(SetLoInt32x16 x y) => (VINSERTI64X4512 [0] x y) +(SetLoInt64x4 x y) => (VINSERTI128256 [0] x y) +(SetLoInt64x8 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint8x32 x y) => (VINSERTI128256 [0] x y) +(SetLoUint8x64 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint16x16 x y) => (VINSERTI128256 [0] x y) +(SetLoUint16x32 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint32x8 x y) => (VINSERTI128256 [0] x y) +(SetLoUint32x16 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint64x4 x y) => (VINSERTI128256 [0] x y) +(SetLoUint64x8 x y) => (VINSERTI64X4512 [0] x y) (ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) (ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) (ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 7860a0889eb65d..591f8a5bcafb58 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -912,12 +912,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, @@ -966,12 +968,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index bf85df5e6dadad..e132b058a4a0b6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -410,6 +410,46 @@ func simdGenericOps() []opData { {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, + {name: "GetHiFloat32x8", argLength: 1, commutative: false}, + {name: "GetHiFloat32x16", argLength: 1, commutative: false}, + {name: "GetHiFloat64x4", argLength: 1, commutative: false}, + {name: "GetHiFloat64x8", argLength: 1, commutative: false}, + {name: "GetHiInt8x32", argLength: 1, commutative: false}, + {name: "GetHiInt8x64", argLength: 1, commutative: false}, + {name: "GetHiInt16x16", argLength: 1, commutative: false}, + {name: "GetHiInt16x32", argLength: 1, commutative: false}, + {name: "GetHiInt32x8", argLength: 1, commutative: false}, + {name: "GetHiInt32x16", argLength: 1, commutative: false}, + {name: "GetHiInt64x4", argLength: 1, commutative: false}, + {name: "GetHiInt64x8", argLength: 1, commutative: false}, + {name: "GetHiUint8x32", argLength: 1, commutative: false}, + {name: "GetHiUint8x64", argLength: 1, commutative: false}, + {name: "GetHiUint16x16", argLength: 1, commutative: false}, + {name: "GetHiUint16x32", argLength: 1, commutative: false}, + {name: "GetHiUint32x8", argLength: 1, commutative: false}, + {name: "GetHiUint32x16", argLength: 1, commutative: false}, + {name: "GetHiUint64x4", argLength: 1, commutative: false}, + {name: "GetHiUint64x8", argLength: 1, commutative: false}, + {name: "GetLoFloat32x8", argLength: 1, commutative: false}, + {name: "GetLoFloat32x16", argLength: 1, commutative: false}, + {name: "GetLoFloat64x4", argLength: 1, commutative: false}, + {name: "GetLoFloat64x8", argLength: 1, commutative: false}, + {name: "GetLoInt8x32", argLength: 1, commutative: false}, + {name: "GetLoInt8x64", argLength: 1, commutative: false}, + {name: "GetLoInt16x16", argLength: 1, commutative: false}, + {name: "GetLoInt16x32", argLength: 1, commutative: false}, + {name: "GetLoInt32x8", argLength: 1, commutative: false}, + {name: "GetLoInt32x16", argLength: 1, commutative: false}, + {name: "GetLoInt64x4", argLength: 1, commutative: false}, + {name: "GetLoInt64x8", argLength: 1, commutative: false}, + {name: "GetLoUint8x32", argLength: 1, commutative: false}, + {name: "GetLoUint8x64", argLength: 1, commutative: false}, + {name: "GetLoUint16x16", argLength: 1, commutative: false}, + {name: "GetLoUint16x32", argLength: 1, commutative: false}, + {name: "GetLoUint32x8", argLength: 1, commutative: false}, + {name: "GetLoUint32x16", argLength: 1, commutative: false}, + {name: "GetLoUint64x4", argLength: 1, commutative: false}, + {name: "GetLoUint64x8", argLength: 1, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, @@ -1180,6 +1220,46 @@ func simdGenericOps() []opData { {name: "ScaleMaskedFloat64x2", argLength: 3, commutative: false}, {name: "ScaleMaskedFloat64x4", argLength: 3, commutative: false}, {name: "ScaleMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "SetHiFloat32x8", argLength: 2, commutative: false}, + {name: "SetHiFloat32x16", argLength: 2, commutative: false}, + {name: "SetHiFloat64x4", argLength: 2, commutative: false}, + {name: "SetHiFloat64x8", argLength: 2, commutative: false}, + {name: "SetHiInt8x32", argLength: 2, commutative: false}, + {name: "SetHiInt8x64", argLength: 2, commutative: false}, + {name: "SetHiInt16x16", argLength: 2, commutative: false}, + {name: "SetHiInt16x32", argLength: 2, commutative: false}, + {name: "SetHiInt32x8", argLength: 2, commutative: false}, + {name: "SetHiInt32x16", argLength: 2, commutative: false}, + {name: "SetHiInt64x4", argLength: 2, commutative: false}, + {name: "SetHiInt64x8", argLength: 2, commutative: false}, + {name: "SetHiUint8x32", argLength: 2, commutative: false}, + {name: "SetHiUint8x64", argLength: 2, commutative: false}, + {name: "SetHiUint16x16", argLength: 2, commutative: false}, + {name: "SetHiUint16x32", argLength: 2, commutative: false}, + {name: "SetHiUint32x8", argLength: 2, commutative: false}, + {name: "SetHiUint32x16", argLength: 2, commutative: false}, + {name: "SetHiUint64x4", argLength: 2, commutative: false}, + {name: "SetHiUint64x8", argLength: 2, commutative: false}, + {name: "SetLoFloat32x8", argLength: 2, commutative: false}, + {name: "SetLoFloat32x16", argLength: 2, commutative: false}, + {name: "SetLoFloat64x4", argLength: 2, commutative: false}, + {name: "SetLoFloat64x8", argLength: 2, commutative: false}, + {name: "SetLoInt8x32", argLength: 2, commutative: false}, + {name: "SetLoInt8x64", argLength: 2, commutative: false}, + {name: "SetLoInt16x16", argLength: 2, commutative: false}, + {name: "SetLoInt16x32", argLength: 2, commutative: false}, + {name: "SetLoInt32x8", argLength: 2, commutative: false}, + {name: "SetLoInt32x16", argLength: 2, commutative: false}, + {name: "SetLoInt64x4", argLength: 2, commutative: false}, + {name: "SetLoInt64x8", argLength: 2, commutative: false}, + {name: "SetLoUint8x32", argLength: 2, commutative: false}, + {name: "SetLoUint8x64", argLength: 2, commutative: false}, + {name: "SetLoUint16x16", argLength: 2, commutative: false}, + {name: "SetLoUint16x32", argLength: 2, commutative: false}, + {name: "SetLoUint32x8", argLength: 2, commutative: false}, + {name: "SetLoUint32x16", argLength: 2, commutative: false}, + {name: "SetLoUint64x4", argLength: 2, commutative: false}, + {name: "SetLoUint64x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, @@ -1624,16 +1704,6 @@ func simdGenericOps() []opData { {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, @@ -1714,16 +1784,6 @@ func simdGenericOps() []opData { {name: "RoundScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9ce9220901cb7d..b39311cd90f105 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2131,12 +2131,14 @@ const ( OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEQBMasked512 - OpAMD64VEXTRACTF128128 - OpAMD64VEXTRACTI128128 OpAMD64VPEXTRB128 OpAMD64VPEXTRW128 OpAMD64VPEXTRD128 OpAMD64VPEXTRQ128 + OpAMD64VEXTRACTF128128 + OpAMD64VEXTRACTF64X4256 + OpAMD64VEXTRACTI128128 + OpAMD64VEXTRACTI64X4256 OpAMD64VPCMPUB128 OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 @@ -2185,12 +2187,14 @@ const ( OpAMD64VPRORQMasked128 OpAMD64VPRORQMasked256 OpAMD64VPRORQMasked512 - OpAMD64VINSERTF128256 - OpAMD64VINSERTI128256 OpAMD64VPINSRB128 OpAMD64VPINSRW128 OpAMD64VPINSRD128 OpAMD64VPINSRQ128 + OpAMD64VINSERTF128256 + OpAMD64VINSERTF64X4512 + OpAMD64VINSERTI128256 + OpAMD64VINSERTI64X4512 OpAMD64VPSHLDW128 OpAMD64VPSHLDW256 OpAMD64VPSHLDW512 @@ -4967,6 +4971,46 @@ const ( OpGaloisFieldMulUint8x16 OpGaloisFieldMulUint8x32 OpGaloisFieldMulUint8x64 + OpGetHiFloat32x8 + OpGetHiFloat32x16 + OpGetHiFloat64x4 + OpGetHiFloat64x8 + OpGetHiInt8x32 + OpGetHiInt8x64 + OpGetHiInt16x16 + OpGetHiInt16x32 + OpGetHiInt32x8 + OpGetHiInt32x16 + OpGetHiInt64x4 + OpGetHiInt64x8 + OpGetHiUint8x32 + OpGetHiUint8x64 + OpGetHiUint16x16 + OpGetHiUint16x32 + OpGetHiUint32x8 + OpGetHiUint32x16 + OpGetHiUint64x4 + OpGetHiUint64x8 + OpGetLoFloat32x8 + OpGetLoFloat32x16 + OpGetLoFloat64x4 + OpGetLoFloat64x8 + OpGetLoInt8x32 + OpGetLoInt8x64 + OpGetLoInt16x16 + OpGetLoInt16x32 + OpGetLoInt32x8 + OpGetLoInt32x16 + OpGetLoInt64x4 + OpGetLoInt64x8 + OpGetLoUint8x32 + OpGetLoUint8x64 + OpGetLoUint16x16 + OpGetLoUint16x32 + OpGetLoUint32x8 + OpGetLoUint32x16 + OpGetLoUint64x4 + OpGetLoUint64x8 OpGreaterEqualFloat32x4 OpGreaterEqualFloat32x8 OpGreaterEqualFloat32x16 @@ -5737,6 +5781,46 @@ const ( OpScaleMaskedFloat64x2 OpScaleMaskedFloat64x4 OpScaleMaskedFloat64x8 + OpSetHiFloat32x8 + OpSetHiFloat32x16 + OpSetHiFloat64x4 + OpSetHiFloat64x8 + OpSetHiInt8x32 + OpSetHiInt8x64 + OpSetHiInt16x16 + OpSetHiInt16x32 + OpSetHiInt32x8 + OpSetHiInt32x16 + OpSetHiInt64x4 + OpSetHiInt64x8 + OpSetHiUint8x32 + OpSetHiUint8x64 + OpSetHiUint16x16 + OpSetHiUint16x32 + OpSetHiUint32x8 + OpSetHiUint32x16 + OpSetHiUint64x4 + OpSetHiUint64x8 + OpSetLoFloat32x8 + OpSetLoFloat32x16 + OpSetLoFloat64x4 + OpSetLoFloat64x8 + OpSetLoInt8x32 + OpSetLoInt8x64 + OpSetLoInt16x16 + OpSetLoInt16x32 + OpSetLoInt32x8 + OpSetLoInt32x16 + OpSetLoInt64x4 + OpSetLoInt64x8 + OpSetLoUint8x32 + OpSetLoUint8x64 + OpSetLoUint16x16 + OpSetLoUint16x32 + OpSetLoUint32x8 + OpSetLoUint32x16 + OpSetLoUint64x4 + OpSetLoUint64x8 OpShiftAllLeftInt16x8 OpShiftAllLeftInt16x16 OpShiftAllLeftInt16x32 @@ -6181,16 +6265,6 @@ const ( OpGaloisFieldAffineTransformUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformUint8x64 - OpGet128Float32x8 - OpGet128Float64x4 - OpGet128Int8x32 - OpGet128Int16x16 - OpGet128Int32x8 - OpGet128Int64x4 - OpGet128Uint8x32 - OpGet128Uint16x16 - OpGet128Uint32x8 - OpGet128Uint64x4 OpGetElemInt8x16 OpGetElemInt16x8 OpGetElemInt32x4 @@ -6271,16 +6345,6 @@ const ( OpRoundScaledResidueMaskedFloat64x2 OpRoundScaledResidueMaskedFloat64x4 OpRoundScaledResidueMaskedFloat64x8 - OpSet128Float32x8 - OpSet128Float64x4 - OpSet128Int8x32 - OpSet128Int16x16 - OpSet128Int32x8 - OpSet128Int64x4 - OpSet128Uint8x32 - OpSet128Uint16x16 - OpSet128Uint32x8 - OpSet128Uint64x4 OpSetElemInt8x16 OpSetElemInt16x8 OpSetElemInt32x4 @@ -33034,41 +33098,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VEXTRACTF128128", + name: "VPEXTRB128", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTF128, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VEXTRACTI128128", + name: "VPEXTRW128", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTI128, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPEXTRB128", + name: "VPEXTRD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRB, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33076,13 +33140,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRW128", + name: "VPEXTRQ128", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRW, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33090,30 +33154,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRD128", + name: "VEXTRACTF128128", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRD, + asm: x86.AVEXTRACTF128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRQ128", + name: "VEXTRACTF64X4256", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRQ, + asm: x86.AVEXTRACTF64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VEXTRACTI128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTI128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXTRACTI64X4256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTI64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33826,14 +33918,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VINSERTF128256", + name: "VPINSRB128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTF128, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33841,14 +33933,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VINSERTI128256", + name: "VPINSRW128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTI128, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33856,10 +33948,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRB128", + name: "VPINSRD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRB, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33871,10 +33963,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", + name: "VPINSRQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRW, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33886,14 +33978,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRD128", + name: "VINSERTF128256", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRD, + asm: x86.AVINSERTF128, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33901,20 +33993,50 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRQ128", + name: "VINSERTF64X4512", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRQ, + asm: x86.AVINSERTF64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VINSERTI128256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTI128, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, + { + name: "VINSERTI64X4512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTI64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSHLDW128", auxType: auxInt8, @@ -64937,6 +65059,206 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "GetHiFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "GetHiFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt8x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt8x64", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt16x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt16x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt32x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt32x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt64x4", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt64x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint8x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint8x64", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint16x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint16x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint32x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint32x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint64x4", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint64x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt8x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt8x64", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt16x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt16x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt32x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt32x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt64x4", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt64x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint8x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint8x64", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint16x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint16x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint32x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint32x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint64x4", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint64x8", + argLen: 1, + generic: true, + }, { name: "GreaterEqualFloat32x4", argLen: 2, @@ -69073,6 +69395,206 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "SetHiFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SetHiFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt8x64", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt32x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt64x4", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt64x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint8x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint8x64", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint16x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint32x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint64x4", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint64x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt8x64", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt32x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt64x4", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt64x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint8x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint8x64", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint16x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint32x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint64x4", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint64x8", + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftInt16x8", argLen: 2, @@ -71389,66 +71911,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "Get128Float32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Float64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int8x32", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int16x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint8x32", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint16x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "GetElemInt8x16", auxType: auxInt8, @@ -71929,66 +72391,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "Set128Float32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Float64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int8x32", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int16x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint8x32", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint16x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "SetElemInt8x16", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e181798245882f..91fd3fb470f080 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1949,36 +1949,6 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldMulUint8x64: v.Op = OpAMD64VGF2P8MULB512 return true - case OpGet128Float32x8: - v.Op = OpAMD64VEXTRACTF128128 - return true - case OpGet128Float64x4: - v.Op = OpAMD64VEXTRACTF128128 - return true - case OpGet128Int16x16: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Int32x8: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Int64x4: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Int8x32: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint16x16: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint32x8: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint64x4: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint8x32: - v.Op = OpAMD64VEXTRACTI128128 - return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2014,6 +1984,86 @@ func rewriteValueAMD64(v *Value) bool { return true case OpGetG: return rewriteValueAMD64_OpGetG(v) + case OpGetHiFloat32x16: + return rewriteValueAMD64_OpGetHiFloat32x16(v) + case OpGetHiFloat32x8: + return rewriteValueAMD64_OpGetHiFloat32x8(v) + case OpGetHiFloat64x4: + return rewriteValueAMD64_OpGetHiFloat64x4(v) + case OpGetHiFloat64x8: + return rewriteValueAMD64_OpGetHiFloat64x8(v) + case OpGetHiInt16x16: + return rewriteValueAMD64_OpGetHiInt16x16(v) + case OpGetHiInt16x32: + return rewriteValueAMD64_OpGetHiInt16x32(v) + case OpGetHiInt32x16: + return rewriteValueAMD64_OpGetHiInt32x16(v) + case OpGetHiInt32x8: + return rewriteValueAMD64_OpGetHiInt32x8(v) + case OpGetHiInt64x4: + return rewriteValueAMD64_OpGetHiInt64x4(v) + case OpGetHiInt64x8: + return rewriteValueAMD64_OpGetHiInt64x8(v) + case OpGetHiInt8x32: + return rewriteValueAMD64_OpGetHiInt8x32(v) + case OpGetHiInt8x64: + return rewriteValueAMD64_OpGetHiInt8x64(v) + case OpGetHiUint16x16: + return rewriteValueAMD64_OpGetHiUint16x16(v) + case OpGetHiUint16x32: + return rewriteValueAMD64_OpGetHiUint16x32(v) + case OpGetHiUint32x16: + return rewriteValueAMD64_OpGetHiUint32x16(v) + case OpGetHiUint32x8: + return rewriteValueAMD64_OpGetHiUint32x8(v) + case OpGetHiUint64x4: + return rewriteValueAMD64_OpGetHiUint64x4(v) + case OpGetHiUint64x8: + return rewriteValueAMD64_OpGetHiUint64x8(v) + case OpGetHiUint8x32: + return rewriteValueAMD64_OpGetHiUint8x32(v) + case OpGetHiUint8x64: + return rewriteValueAMD64_OpGetHiUint8x64(v) + case OpGetLoFloat32x16: + return rewriteValueAMD64_OpGetLoFloat32x16(v) + case OpGetLoFloat32x8: + return rewriteValueAMD64_OpGetLoFloat32x8(v) + case OpGetLoFloat64x4: + return rewriteValueAMD64_OpGetLoFloat64x4(v) + case OpGetLoFloat64x8: + return rewriteValueAMD64_OpGetLoFloat64x8(v) + case OpGetLoInt16x16: + return rewriteValueAMD64_OpGetLoInt16x16(v) + case OpGetLoInt16x32: + return rewriteValueAMD64_OpGetLoInt16x32(v) + case OpGetLoInt32x16: + return rewriteValueAMD64_OpGetLoInt32x16(v) + case OpGetLoInt32x8: + return rewriteValueAMD64_OpGetLoInt32x8(v) + case OpGetLoInt64x4: + return rewriteValueAMD64_OpGetLoInt64x4(v) + case OpGetLoInt64x8: + return rewriteValueAMD64_OpGetLoInt64x8(v) + case OpGetLoInt8x32: + return rewriteValueAMD64_OpGetLoInt8x32(v) + case OpGetLoInt8x64: + return rewriteValueAMD64_OpGetLoInt8x64(v) + case OpGetLoUint16x16: + return rewriteValueAMD64_OpGetLoUint16x16(v) + case OpGetLoUint16x32: + return rewriteValueAMD64_OpGetLoUint16x32(v) + case OpGetLoUint32x16: + return rewriteValueAMD64_OpGetLoUint32x16(v) + case OpGetLoUint32x8: + return rewriteValueAMD64_OpGetLoUint32x8(v) + case OpGetLoUint64x4: + return rewriteValueAMD64_OpGetLoUint64x4(v) + case OpGetLoUint64x8: + return rewriteValueAMD64_OpGetLoUint64x8(v) + case OpGetLoUint8x32: + return rewriteValueAMD64_OpGetLoUint8x32(v) + case OpGetLoUint8x64: + return rewriteValueAMD64_OpGetLoUint8x64(v) case OpGreaterEqualFloat32x16: return rewriteValueAMD64_OpGreaterEqualFloat32x16(v) case OpGreaterEqualFloat32x4: @@ -4306,36 +4356,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) - case OpSet128Float32x8: - v.Op = OpAMD64VINSERTF128256 - return true - case OpSet128Float64x4: - v.Op = OpAMD64VINSERTF128256 - return true - case OpSet128Int16x16: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Int32x8: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Int64x4: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Int8x32: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint16x16: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint32x8: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint64x4: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint8x32: - v.Op = OpAMD64VINSERTI128256 - return true case OpSetElemInt16x8: v.Op = OpAMD64VPINSRW128 return true @@ -4360,6 +4380,86 @@ func rewriteValueAMD64(v *Value) bool { case OpSetElemUint8x16: v.Op = OpAMD64VPINSRB128 return true + case OpSetHiFloat32x16: + return rewriteValueAMD64_OpSetHiFloat32x16(v) + case OpSetHiFloat32x8: + return rewriteValueAMD64_OpSetHiFloat32x8(v) + case OpSetHiFloat64x4: + return rewriteValueAMD64_OpSetHiFloat64x4(v) + case OpSetHiFloat64x8: + return rewriteValueAMD64_OpSetHiFloat64x8(v) + case OpSetHiInt16x16: + return rewriteValueAMD64_OpSetHiInt16x16(v) + case OpSetHiInt16x32: + return rewriteValueAMD64_OpSetHiInt16x32(v) + case OpSetHiInt32x16: + return rewriteValueAMD64_OpSetHiInt32x16(v) + case OpSetHiInt32x8: + return rewriteValueAMD64_OpSetHiInt32x8(v) + case OpSetHiInt64x4: + return rewriteValueAMD64_OpSetHiInt64x4(v) + case OpSetHiInt64x8: + return rewriteValueAMD64_OpSetHiInt64x8(v) + case OpSetHiInt8x32: + return rewriteValueAMD64_OpSetHiInt8x32(v) + case OpSetHiInt8x64: + return rewriteValueAMD64_OpSetHiInt8x64(v) + case OpSetHiUint16x16: + return rewriteValueAMD64_OpSetHiUint16x16(v) + case OpSetHiUint16x32: + return rewriteValueAMD64_OpSetHiUint16x32(v) + case OpSetHiUint32x16: + return rewriteValueAMD64_OpSetHiUint32x16(v) + case OpSetHiUint32x8: + return rewriteValueAMD64_OpSetHiUint32x8(v) + case OpSetHiUint64x4: + return rewriteValueAMD64_OpSetHiUint64x4(v) + case OpSetHiUint64x8: + return rewriteValueAMD64_OpSetHiUint64x8(v) + case OpSetHiUint8x32: + return rewriteValueAMD64_OpSetHiUint8x32(v) + case OpSetHiUint8x64: + return rewriteValueAMD64_OpSetHiUint8x64(v) + case OpSetLoFloat32x16: + return rewriteValueAMD64_OpSetLoFloat32x16(v) + case OpSetLoFloat32x8: + return rewriteValueAMD64_OpSetLoFloat32x8(v) + case OpSetLoFloat64x4: + return rewriteValueAMD64_OpSetLoFloat64x4(v) + case OpSetLoFloat64x8: + return rewriteValueAMD64_OpSetLoFloat64x8(v) + case OpSetLoInt16x16: + return rewriteValueAMD64_OpSetLoInt16x16(v) + case OpSetLoInt16x32: + return rewriteValueAMD64_OpSetLoInt16x32(v) + case OpSetLoInt32x16: + return rewriteValueAMD64_OpSetLoInt32x16(v) + case OpSetLoInt32x8: + return rewriteValueAMD64_OpSetLoInt32x8(v) + case OpSetLoInt64x4: + return rewriteValueAMD64_OpSetLoInt64x4(v) + case OpSetLoInt64x8: + return rewriteValueAMD64_OpSetLoInt64x8(v) + case OpSetLoInt8x32: + return rewriteValueAMD64_OpSetLoInt8x32(v) + case OpSetLoInt8x64: + return rewriteValueAMD64_OpSetLoInt8x64(v) + case OpSetLoUint16x16: + return rewriteValueAMD64_OpSetLoUint16x16(v) + case OpSetLoUint16x32: + return rewriteValueAMD64_OpSetLoUint16x32(v) + case OpSetLoUint32x16: + return rewriteValueAMD64_OpSetLoUint32x16(v) + case OpSetLoUint32x8: + return rewriteValueAMD64_OpSetLoUint32x8(v) + case OpSetLoUint64x4: + return rewriteValueAMD64_OpSetLoUint64x4(v) + case OpSetLoUint64x8: + return rewriteValueAMD64_OpSetLoUint64x8(v) + case OpSetLoUint8x32: + return rewriteValueAMD64_OpSetLoUint8x32(v) + case OpSetLoUint8x64: + return rewriteValueAMD64_OpSetLoUint8x64(v) case OpShiftAllLeftConcatInt16x16: v.Op = OpAMD64VPSHLDW256 return true @@ -35376,6 +35476,486 @@ func rewriteValueAMD64_OpGetG(v *Value) bool { } return false } +func rewriteValueAMD64_OpGetHiFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat32x16 x) + // result: (VEXTRACTF64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat32x8 x) + // result: (VEXTRACTF128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat64x4 x) + // result: (VEXTRACTF128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat64x8 x) + // result: (VEXTRACTF64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt16x16 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt16x32 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt32x16 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt32x8 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt64x4 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt64x8 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt8x32 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt8x64 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint16x16 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint16x32 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint32x16 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint32x8 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint64x4 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint64x8 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint8x32 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint8x64 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat32x16 x) + // result: (VEXTRACTF64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat32x8 x) + // result: (VEXTRACTF128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat64x4 x) + // result: (VEXTRACTF128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat64x8 x) + // result: (VEXTRACTF64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt16x16 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt16x32 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt32x16 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt32x8 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt64x4 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt64x8 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt8x32 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt8x64 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint16x16 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint16x32 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint32x16 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint32x8 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint64x4 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint64x8 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint8x32 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint8x64 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -50409,6 +50989,566 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat32x16 x y) + // result: (VINSERTF64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat32x8 x y) + // result: (VINSERTF128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat64x4 x y) + // result: (VINSERTF128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat64x8 x y) + // result: (VINSERTF64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt16x16 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt16x32 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt32x16 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt32x8 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt64x4 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt64x8 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt8x32 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt8x64 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint16x16 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint16x32 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint32x16 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint32x8 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint64x4 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint64x8 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint8x32 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint8x64 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat32x16 x y) + // result: (VINSERTF64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat32x8 x y) + // result: (VINSERTF128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat64x4 x y) + // result: (VINSERTF128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat64x8 x y) + // result: (VINSERTF64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt16x16 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt16x32 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt32x16 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt32x8 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt64x4 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt64x8 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt8x32 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt8x64 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint16x16 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint16x32 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint32x16 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint32x8 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint64x4 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint64x8 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint8x32 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint8x64 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index fb68846347d273..873bb8e2de17c9 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -478,16 +478,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x8.Get128", opLen1Imm8(ssa.OpGet128Float32x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Float64x4.Get128", opLen1Imm8(ssa.OpGet128Float64x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int8x32.Get128", opLen1Imm8(ssa.OpGet128Int8x32, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.Get128", opLen1Imm8(ssa.OpGet128Int16x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.Get128", opLen1Imm8(ssa.OpGet128Int32x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.Get128", opLen1Imm8(ssa.OpGet128Int64x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.Get128", opLen1Imm8(ssa.OpGet128Uint8x32, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.Get128", opLen1Imm8(ssa.OpGet128Uint16x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.Get128", opLen1Imm8(ssa.OpGet128Uint32x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.Get128", opLen1Imm8(ssa.OpGet128Uint64x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) @@ -496,6 +486,46 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.GetElem", opLen1Imm8(ssa.OpGetElemUint16x8, types.Types[types.TUINT16], 0), sys.AMD64) addF(simdPackage, "Uint32x4.GetElem", opLen1Imm8(ssa.OpGetElemUint32x4, types.Types[types.TUINT32], 0), sys.AMD64) addF(simdPackage, "Uint64x2.GetElem", opLen1Imm8(ssa.OpGetElemUint64x2, types.Types[types.TUINT64], 0), sys.AMD64) + addF(simdPackage, "Float32x8.GetHi", opLen1(ssa.OpGetHiFloat32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x16.GetHi", opLen1(ssa.OpGetHiFloat32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.GetHi", opLen1(ssa.OpGetHiFloat64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x8.GetHi", opLen1(ssa.OpGetHiFloat64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.GetHi", opLen1(ssa.OpGetHiInt8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x64.GetHi", opLen1(ssa.OpGetHiInt8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.GetHi", opLen1(ssa.OpGetHiInt16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.GetHi", opLen1(ssa.OpGetHiInt16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.GetHi", opLen1(ssa.OpGetHiInt32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.GetHi", opLen1(ssa.OpGetHiInt32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.GetHi", opLen1(ssa.OpGetHiInt64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.GetHi", opLen1(ssa.OpGetHiInt64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.GetHi", opLen1(ssa.OpGetHiUint8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x64.GetHi", opLen1(ssa.OpGetHiUint8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.GetHi", opLen1(ssa.OpGetHiUint16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.GetHi", opLen1(ssa.OpGetHiUint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.GetHi", opLen1(ssa.OpGetHiUint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.GetHi", opLen1(ssa.OpGetHiUint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.GetHi", opLen1(ssa.OpGetHiUint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.GetHi", opLen1(ssa.OpGetHiUint64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.GetLo", opLen1(ssa.OpGetLoFloat32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x16.GetLo", opLen1(ssa.OpGetLoFloat32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.GetLo", opLen1(ssa.OpGetLoFloat64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x8.GetLo", opLen1(ssa.OpGetLoFloat64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.GetLo", opLen1(ssa.OpGetLoInt8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x64.GetLo", opLen1(ssa.OpGetLoInt8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.GetLo", opLen1(ssa.OpGetLoInt16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.GetLo", opLen1(ssa.OpGetLoInt16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.GetLo", opLen1(ssa.OpGetLoInt32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.GetLo", opLen1(ssa.OpGetLoInt32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.GetLo", opLen1(ssa.OpGetLoInt64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.GetLo", opLen1(ssa.OpGetLoInt64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.GetLo", opLen1(ssa.OpGetLoUint8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x64.GetLo", opLen1(ssa.OpGetLoUint8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.GetLo", opLen1(ssa.OpGetLoUint16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.GetLo", opLen1(ssa.OpGetLoUint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.GetLo", opLen1(ssa.OpGetLoUint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.GetLo", opLen1(ssa.OpGetLoUint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.GetLo", opLen1(ssa.OpGetLoUint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.GetLo", opLen1(ssa.OpGetLoUint64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) @@ -1338,16 +1368,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x16.Set128", opLen2Imm8(ssa.OpSet128Int16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x8.Set128", opLen2Imm8(ssa.OpSet128Int32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x4.Set128", opLen2Imm8(ssa.OpSet128Int64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.Set128", opLen2Imm8(ssa.OpSet128Uint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.Set128", opLen2Imm8(ssa.OpSet128Uint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.Set128", opLen2Imm8(ssa.OpSet128Uint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.Set128", opLen2Imm8(ssa.OpSet128Uint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) @@ -1356,6 +1376,46 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SetElem", opLen2Imm8(ssa.OpSetElemUint16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint32x4.SetElem", opLen2Imm8(ssa.OpSetElemUint32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float32x8.SetHi", opLen2(ssa.OpSetHiFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SetHi", opLen2(ssa.OpSetHiFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.SetHi", opLen2(ssa.OpSetHiFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SetHi", opLen2(ssa.OpSetHiFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x32.SetHi", opLen2(ssa.OpSetHiInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SetHi", opLen2(ssa.OpSetHiInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.SetHi", opLen2(ssa.OpSetHiInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SetHi", opLen2(ssa.OpSetHiInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x8.SetHi", opLen2(ssa.OpSetHiInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SetHi", opLen2(ssa.OpSetHiInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x4.SetHi", opLen2(ssa.OpSetHiInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.SetHi", opLen2(ssa.OpSetHiInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x32.SetHi", opLen2(ssa.OpSetHiUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SetHi", opLen2(ssa.OpSetHiUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.SetHi", opLen2(ssa.OpSetHiUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SetHi", opLen2(ssa.OpSetHiUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x8.SetHi", opLen2(ssa.OpSetHiUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SetHi", opLen2(ssa.OpSetHiUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x4.SetHi", opLen2(ssa.OpSetHiUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.SetHi", opLen2(ssa.OpSetHiUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.SetLo", opLen2(ssa.OpSetLoFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SetLo", opLen2(ssa.OpSetLoFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.SetLo", opLen2(ssa.OpSetLoFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SetLo", opLen2(ssa.OpSetLoFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x32.SetLo", opLen2(ssa.OpSetLoInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SetLo", opLen2(ssa.OpSetLoInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.SetLo", opLen2(ssa.OpSetLoInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SetLo", opLen2(ssa.OpSetLoInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x8.SetLo", opLen2(ssa.OpSetLoInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SetLo", opLen2(ssa.OpSetLoInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x4.SetLo", opLen2(ssa.OpSetLoInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.SetLo", opLen2(ssa.OpSetLoInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x32.SetLo", opLen2(ssa.OpSetLoUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SetLo", opLen2(ssa.OpSetLoUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.SetLo", opLen2(ssa.OpSetLoUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SetLo", opLen2(ssa.OpSetLoUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x8.SetLo", opLen2(ssa.OpSetLoUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SetLo", opLen2(ssa.OpSetLoUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x4.SetLo", opLen2(ssa.OpSetLoUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.SetLo", opLen2(ssa.OpSetLoUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x32, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 61a708b56e0cfb..5eb8fea47691a2 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3041,135 +3041,267 @@ func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, mask Mask8x32) Uint8x32 // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 -/* Get128 */ +/* GetElem */ -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float32x8) Get128(index uint8) Float32x4 +// Asm: VPEXTRB, CPU Feature: AVX512BW +func (x Int8x16) GetElem(index uint8) int8 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float64x4) Get128(index uint8) Float64x2 +// Asm: VPEXTRW, CPU Feature: AVX512BW +func (x Int16x8) GetElem(index uint8) int16 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int8x32) Get128(index uint8) Int8x16 +// Asm: VPEXTRD, CPU Feature: AVX +func (x Int32x4) GetElem(index uint8) int32 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int16x16) Get128(index uint8) Int16x8 +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Int64x2) GetElem(index uint8) int64 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int32x8) Get128(index uint8) Int32x4 +// Asm: VPEXTRB, CPU Feature: AVX512BW +func (x Uint8x16) GetElem(index uint8) uint8 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int64x4) Get128(index uint8) Int64x2 +// Asm: VPEXTRW, CPU Feature: AVX512BW +func (x Uint16x8) GetElem(index uint8) uint16 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint8x32) Get128(index uint8) Uint8x16 +// Asm: VPEXTRD, CPU Feature: AVX +func (x Uint32x4) GetElem(index uint8) uint32 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Uint64x2) GetElem(index uint8) uint64 + +/* GetHi */ + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float32x8) GetHi() Float32x4 + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float32x16) GetHi() Float32x8 + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float64x4) GetHi() Float64x2 + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float64x8) GetHi() Float64x4 + +// GetHi returns the upper half of x. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint16x16) Get128(index uint8) Uint16x8 +func (x Int8x32) GetHi() Int8x16 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int8x64) GetHi() Int8x32 + +// GetHi returns the upper half of x. // // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint32x8) Get128(index uint8) Uint32x4 +func (x Int16x16) GetHi() Int16x8 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int16x32) GetHi() Int16x16 + +// GetHi returns the upper half of x. // // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint64x4) Get128(index uint8) Uint64x2 +func (x Int32x8) GetHi() Int32x4 -/* GetElem */ +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int32x16) GetHi() Int32x8 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int64x4) GetHi() Int64x2 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRB, CPU Feature: AVX512BW -func (x Int8x16) GetElem(index uint8) int8 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int64x8) GetHi() Int64x4 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint8x32) GetHi() Uint8x16 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRW, CPU Feature: AVX512BW -func (x Int16x8) GetElem(index uint8) int16 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint8x64) GetHi() Uint8x32 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint16x16) GetHi() Uint16x8 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRD, CPU Feature: AVX -func (x Int32x4) GetElem(index uint8) int32 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint16x32) GetHi() Uint16x16 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint32x8) GetHi() Uint32x4 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRQ, CPU Feature: AVX -func (x Int64x2) GetElem(index uint8) int64 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint32x16) GetHi() Uint32x8 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint64x4) GetHi() Uint64x2 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRB, CPU Feature: AVX512BW -func (x Uint8x16) GetElem(index uint8) uint8 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint64x8) GetHi() Uint64x4 -// GetElem retrieves a single constant-indexed element's value. +/* GetLo */ + +// GetLo returns the lower half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float32x8) GetLo() Float32x4 + +// GetLo returns the lower half of x. // -// Asm: VPEXTRW, CPU Feature: AVX512BW -func (x Uint16x8) GetElem(index uint8) uint16 +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float32x16) GetLo() Float32x8 -// GetElem retrieves a single constant-indexed element's value. +// GetLo returns the lower half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float64x4) GetLo() Float64x2 + +// GetLo returns the lower half of x. // -// Asm: VPEXTRD, CPU Feature: AVX -func (x Uint32x4) GetElem(index uint8) uint32 +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float64x8) GetLo() Float64x4 -// GetElem retrieves a single constant-indexed element's value. +// GetLo returns the lower half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int8x32) GetLo() Int8x16 + +// GetLo returns the lower half of x. // -// Asm: VPEXTRQ, CPU Feature: AVX -func (x Uint64x2) GetElem(index uint8) uint64 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int8x64) GetLo() Int8x32 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int16x16) GetLo() Int16x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int16x32) GetLo() Int16x16 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int32x8) GetLo() Int32x4 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int32x16) GetLo() Int32x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int64x4) GetLo() Int64x2 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int64x8) GetLo() Int64x4 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint8x32) GetLo() Uint8x16 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint8x64) GetLo() Uint8x32 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint16x16) GetLo() Uint16x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint16x32) GetLo() Uint16x16 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint32x8) GetLo() Uint32x4 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint32x16) GetLo() Uint32x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint64x4) GetLo() Uint64x2 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint64x8) GetLo() Uint64x4 /* Greater */ @@ -8757,135 +8889,267 @@ func (x Float64x4) ScaleMasked(y Float64x4, mask Mask64x4) Float64x4 // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 -/* Set128 */ +/* SetElem */ -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTF128, CPU Feature: AVX -func (x Float32x8) Set128(index uint8, y Float32x4) Float32x8 +// Asm: VPINSRB, CPU Feature: AVX +func (x Int8x16) SetElem(index uint8, y int8) Int8x16 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTF128, CPU Feature: AVX -func (x Float64x4) Set128(index uint8, y Float64x2) Float64x4 +// Asm: VPINSRW, CPU Feature: AVX +func (x Int16x8) SetElem(index uint8, y int16) Int16x8 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int8x32) Set128(index uint8, y Int8x16) Int8x32 +// Asm: VPINSRD, CPU Feature: AVX +func (x Int32x4) SetElem(index uint8, y int32) Int32x4 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int16x16) Set128(index uint8, y Int16x8) Int16x16 +// Asm: VPINSRQ, CPU Feature: AVX +func (x Int64x2) SetElem(index uint8, y int64) Int64x2 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int32x8) Set128(index uint8, y Int32x4) Int32x8 +// Asm: VPINSRB, CPU Feature: AVX +func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int64x4) Set128(index uint8, y Int64x2) Int64x4 +// Asm: VPINSRW, CPU Feature: AVX +func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint8x32) Set128(index uint8, y Uint8x16) Uint8x32 +// Asm: VPINSRD, CPU Feature: AVX +func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // +// Asm: VPINSRQ, CPU Feature: AVX +func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 + +/* SetHi */ + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float32x8) SetHi(y Float32x4) Float32x8 + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float32x16) SetHi(y Float32x8) Float32x16 + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float64x4) SetHi(y Float64x2) Float64x4 + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float64x8) SetHi(y Float64x4) Float64x8 + +// SetHi returns x with its upper half set to y. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint16x16) Set128(index uint8, y Uint16x8) Uint16x16 +func (x Int8x32) SetHi(y Int8x16) Int8x32 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int8x64) SetHi(y Int8x32) Int8x64 + +// SetHi returns x with its upper half set to y. // // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint32x8) Set128(index uint8, y Uint32x4) Uint32x8 +func (x Int16x16) SetHi(y Int16x8) Int16x16 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int16x32) SetHi(y Int16x16) Int16x32 + +// SetHi returns x with its upper half set to y. // // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint64x4) Set128(index uint8, y Uint64x2) Uint64x4 +func (x Int32x8) SetHi(y Int32x4) Int32x8 -/* SetElem */ +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int32x16) SetHi(y Int32x8) Int32x16 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int64x4) SetHi(y Int64x2) Int64x4 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Int8x16) SetElem(index uint8, y int8) Int8x16 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int64x8) SetHi(y Int64x4) Int64x8 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint8x32) SetHi(y Uint8x16) Uint8x32 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Int16x8) SetElem(index uint8, y int16) Int16x8 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint8x64) SetHi(y Uint8x32) Uint8x64 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint16x16) SetHi(y Uint16x8) Uint16x16 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(index uint8, y int32) Int32x4 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint16x32) SetHi(y Uint16x16) Uint16x32 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint32x8) SetHi(y Uint32x4) Uint32x8 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Int64x2) SetElem(index uint8, y int64) Int64x2 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint32x16) SetHi(y Uint32x8) Uint32x16 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint64x4) SetHi(y Uint64x2) Uint64x4 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint64x8) SetHi(y Uint64x4) Uint64x8 -// SetElem sets a single constant-indexed element's value. +/* SetLo */ + +// SetLo returns x with its lower half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float32x8) SetLo(y Float32x4) Float32x8 + +// SetLo returns x with its lower half set to y. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float32x16) SetLo(y Float32x8) Float32x16 -// SetElem sets a single constant-indexed element's value. +// SetLo returns x with its lower half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float64x4) SetLo(y Float64x2) Float64x4 + +// SetLo returns x with its lower half set to y. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float64x8) SetLo(y Float64x4) Float64x8 -// SetElem sets a single constant-indexed element's value. +// SetLo returns x with its lower half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int8x32) SetLo(y Int8x16) Int8x32 + +// SetLo returns x with its lower half set to y. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int8x64) SetLo(y Int8x32) Int8x64 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int16x16) SetLo(y Int16x8) Int16x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int16x32) SetLo(y Int16x16) Int16x32 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int32x8) SetLo(y Int32x4) Int32x8 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int32x16) SetLo(y Int32x8) Int32x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int64x4) SetLo(y Int64x2) Int64x4 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int64x8) SetLo(y Int64x4) Int64x8 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint8x32) SetLo(y Uint8x16) Uint8x32 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint8x64) SetLo(y Uint8x32) Uint8x64 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint16x16) SetLo(y Uint16x8) Uint16x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint16x32) SetLo(y Uint16x16) Uint16x32 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint32x8) SetLo(y Uint32x4) Uint32x8 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint32x16) SetLo(y Uint32x8) Uint32x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint64x4) SetLo(y Uint64x2) Uint64x4 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint64x8) SetLo(y Uint64x4) Uint64x8 /* ShiftAllLeft */ diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 1df27f875760aa..571834783887b4 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -257,93 +257,6 @@ func TestSlicesInt8GetElem(t *testing.T) { } -func TestSlicesInt8Set128(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadInt8x16Slice(a) // 1-16 - u := simd.LoadInt8x32Slice(a) // 1-32 - - w := u.Set128(1, v) // 1-16:1-16 - - b := make([]int8, 32, 32) - w.StoreSlice(b) - - checkSlices(t, a, b[:16]) - checkSlices(t, a, b[16:]) -} - -func TestSlicesInt8Get128(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - u := simd.LoadInt8x32Slice(a) // 1-32 - v := u.Get128(0) // 1-16 - w := u.Get128(1) // 17-32 - - b := make([]int8, 32, 32) - v.StoreSlice(b[:16]) - w.StoreSlice(b[16:]) - - checkSlices(t, a, b) -} - -func TestSlicesFloat32Set128(t *testing.T) { - a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadFloat32x4Slice(a) // 1-4 - u := simd.LoadFloat32x8Slice(a) // 1-4 - - w := u.Set128(1, v) // 1-4:1-4 - - b := make([]float32, 8, 8) - w.StoreSlice(b) - - checkSlices(t, a, b[:4]) - checkSlices(t, a, b[4:]) -} - -func TestSlicesFloat32Get128(t *testing.T) { - a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - u := simd.LoadFloat32x8Slice(a) // 1-8 - v := u.Get128(0) // 1-4 - w := u.Get128(1) // 5-8 - - b := make([]float32, 8, 8) - v.StoreSlice(b[:4]) - w.StoreSlice(b[4:]) - - checkSlices(t, a, b) -} - -func TestSlicesFloat64Set128(t *testing.T) { - a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadFloat64x2Slice(a) // 1-2 - u := simd.LoadFloat64x4Slice(a) // 1-2 - - w := u.Set128(1, v) // 1-2:1-2 - - b := make([]float64, 4, 4) - w.StoreSlice(b) - - checkSlices(t, a, b[:2]) - checkSlices(t, a, b[2:]) -} - -func TestSlicesFloat64Get128(t *testing.T) { - a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - u := simd.LoadFloat64x4Slice(a) // 1-4 - v := u.Get128(0) // 1-2 - w := u.Get128(1) // 3-4 - - b := make([]float64, 4, 4) - v.StoreSlice(b[:2]) - w.StoreSlice(b[2:]) - - checkSlices(t, a, b) -} - func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 6d0b5a41f298bc..206d3b98cb28f7 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -76,9 +76,9 @@ func LoadInt8x32SlicePart(s []int8) Int8x32 { return x } if l > 16 { - return x.Set128(0, LoadInt8x16Slice(s)).Set128(1, LoadInt8x16SlicePart(s[16:])) + return x.SetLo(LoadInt8x16Slice(s)).SetHi(LoadInt8x16SlicePart(s[16:])) } else { - return x.Set128(0, LoadInt8x16SlicePart(s)) + return x.SetLo(LoadInt8x16SlicePart(s)) } } @@ -95,9 +95,9 @@ func LoadInt16x16SlicePart(s []int16) Int16x16 { return x } if l > 8 { - return x.Set128(0, LoadInt16x8Slice(s)).Set128(1, LoadInt16x8SlicePart(s[8:])) + return x.SetLo(LoadInt16x8Slice(s)).SetHi(LoadInt16x8SlicePart(s[8:])) } else { - return x.Set128(0, LoadInt16x8SlicePart(s)) + return x.SetLo(LoadInt16x8SlicePart(s)) } } @@ -114,10 +114,10 @@ func (x Int8x32) StoreSlicePart(s []int8) { return } if l > 16 { - x.Get128(0).StoreSlice(s) - x.Get128(1).StoreSlicePart(s[16:]) + x.GetLo().StoreSlice(s) + x.GetHi().StoreSlicePart(s[16:]) } else { // fits in one - x.Get128(0).StoreSlicePart(s) + x.GetLo().StoreSlicePart(s) } } @@ -134,10 +134,10 @@ func (x Int16x16) StoreSlicePart(s []int16) { return } if l > 8 { - x.Get128(0).StoreSlice(s) - x.Get128(1).StoreSlicePart(s[8:]) + x.GetLo().StoreSlice(s) + x.GetHi().StoreSlicePart(s[8:]) } else { // fits in one - x.Get128(0).StoreSlicePart(s) + x.GetLo().StoreSlicePart(s) } } From 5b0ef7fcdc18bcec16b50e4ebc220f3ee3a9a4cb Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 5 Aug 2025 19:42:12 +0000 Subject: [PATCH 115/139] [dev.simd] cmd/compile, simd: add Expand This CL is generated by CL 693336. Change-Id: Ic1712d49fcad0544fa3c19b0249d8bc65b347104 Reviewed-on: https://go-review.googlesource.com/c/go/+/693375 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 36 ++ .../compile/internal/ssa/_gen/simdAMD64.rules | 30 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 18 + .../internal/ssa/_gen/simdgenericOps.go | 30 + src/cmd/compile/internal/ssa/opGen.go | 450 +++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 540 ++++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 30 + src/simd/ops_amd64.go | 182 ++++++ src/simd/simd_test.go | 16 + 9 files changed, 1332 insertions(+) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 7a0a0be58fa329..b778cd7994a7b3 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -644,6 +644,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, + ssa.OpAMD64VEXPANDPSMasked128, + ssa.OpAMD64VEXPANDPSMasked256, + ssa.OpAMD64VEXPANDPSMasked512, + ssa.OpAMD64VEXPANDPDMasked128, + ssa.OpAMD64VEXPANDPDMasked256, + ssa.OpAMD64VEXPANDPDMasked512, + ssa.OpAMD64VPEXPANDBMasked128, + ssa.OpAMD64VPEXPANDBMasked256, + ssa.OpAMD64VPEXPANDBMasked512, + ssa.OpAMD64VPEXPANDWMasked128, + ssa.OpAMD64VPEXPANDWMasked256, + ssa.OpAMD64VPEXPANDWMasked512, + ssa.OpAMD64VPEXPANDDMasked128, + ssa.OpAMD64VPEXPANDDMasked256, + ssa.OpAMD64VPEXPANDDMasked512, + ssa.OpAMD64VPEXPANDQMasked128, + ssa.OpAMD64VPEXPANDQMasked256, + ssa.OpAMD64VPEXPANDQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1229,6 +1247,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VEXPANDPSMasked128, + ssa.OpAMD64VEXPANDPSMasked256, + ssa.OpAMD64VEXPANDPSMasked512, + ssa.OpAMD64VEXPANDPDMasked128, + ssa.OpAMD64VEXPANDPDMasked256, + ssa.OpAMD64VEXPANDPDMasked512, + ssa.OpAMD64VPEXPANDBMasked128, + ssa.OpAMD64VPEXPANDBMasked256, + ssa.OpAMD64VPEXPANDBMasked512, + ssa.OpAMD64VPEXPANDWMasked128, + ssa.OpAMD64VPEXPANDWMasked256, + ssa.OpAMD64VPEXPANDWMasked512, + ssa.OpAMD64VPEXPANDDMasked128, + ssa.OpAMD64VPEXPANDDMasked256, + ssa.OpAMD64VPEXPANDDMasked512, + ssa.OpAMD64VPEXPANDQMasked128, + ssa.OpAMD64VPEXPANDQMasked256, + ssa.OpAMD64VPEXPANDQMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 316db1b8411068..ae29a9117ea16c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -385,6 +385,36 @@ (EqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (EqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (EqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(ExpandFloat32x4 x mask) => (VEXPANDPSMasked128 x (VPMOVVec32x4ToM mask)) +(ExpandFloat32x8 x mask) => (VEXPANDPSMasked256 x (VPMOVVec32x8ToM mask)) +(ExpandFloat32x16 x mask) => (VEXPANDPSMasked512 x (VPMOVVec32x16ToM mask)) +(ExpandFloat64x2 x mask) => (VEXPANDPDMasked128 x (VPMOVVec64x2ToM mask)) +(ExpandFloat64x4 x mask) => (VEXPANDPDMasked256 x (VPMOVVec64x4ToM mask)) +(ExpandFloat64x8 x mask) => (VEXPANDPDMasked512 x (VPMOVVec64x8ToM mask)) +(ExpandInt8x16 x mask) => (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) +(ExpandInt8x32 x mask) => (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) +(ExpandInt8x64 x mask) => (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) +(ExpandInt16x8 x mask) => (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) +(ExpandInt16x16 x mask) => (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) +(ExpandInt16x32 x mask) => (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) +(ExpandInt32x4 x mask) => (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) +(ExpandInt32x8 x mask) => (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) +(ExpandInt32x16 x mask) => (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) +(ExpandInt64x2 x mask) => (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) +(ExpandInt64x4 x mask) => (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) +(ExpandInt64x8 x mask) => (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) +(ExpandUint8x16 x mask) => (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) +(ExpandUint8x32 x mask) => (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) +(ExpandUint8x64 x mask) => (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) +(ExpandUint16x8 x mask) => (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) +(ExpandUint16x16 x mask) => (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) +(ExpandUint16x32 x mask) => (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) +(ExpandUint32x4 x mask) => (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) +(ExpandUint32x8 x mask) => (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) +(ExpandUint32x16 x mask) => (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) +(ExpandUint64x2 x mask) => (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) +(ExpandUint64x4 x mask) => (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) +(ExpandUint64x8 x mask) => (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) (FloorFloat32x4 x) => (VROUNDPS128 [1] x) (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 591f8a5bcafb58..ccda39f59d33d7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -49,6 +49,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VEXPANDPDMasked128", argLength: 2, reg: wkw, asm: "VEXPANDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXPANDPDMasked256", argLength: 2, reg: wkw, asm: "VEXPANDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXPANDPDMasked512", argLength: 2, reg: wkw, asm: "VEXPANDPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VEXPANDPSMasked128", argLength: 2, reg: wkw, asm: "VEXPANDPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXPANDPSMasked256", argLength: 2, reg: wkw, asm: "VEXPANDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXPANDPSMasked512", argLength: 2, reg: wkw, asm: "VEXPANDPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, @@ -357,6 +363,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDBMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDBMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDBMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDDMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDDMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDDMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDQMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDQMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDQMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDWMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDWMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDWMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index e132b058a4a0b6..d0a4a494b181c0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -364,6 +364,36 @@ func simdGenericOps() []opData { {name: "EqualUint64x2", argLength: 2, commutative: true}, {name: "EqualUint64x4", argLength: 2, commutative: true}, {name: "EqualUint64x8", argLength: 2, commutative: true}, + {name: "ExpandFloat32x4", argLength: 2, commutative: false}, + {name: "ExpandFloat32x8", argLength: 2, commutative: false}, + {name: "ExpandFloat32x16", argLength: 2, commutative: false}, + {name: "ExpandFloat64x2", argLength: 2, commutative: false}, + {name: "ExpandFloat64x4", argLength: 2, commutative: false}, + {name: "ExpandFloat64x8", argLength: 2, commutative: false}, + {name: "ExpandInt8x16", argLength: 2, commutative: false}, + {name: "ExpandInt8x32", argLength: 2, commutative: false}, + {name: "ExpandInt8x64", argLength: 2, commutative: false}, + {name: "ExpandInt16x8", argLength: 2, commutative: false}, + {name: "ExpandInt16x16", argLength: 2, commutative: false}, + {name: "ExpandInt16x32", argLength: 2, commutative: false}, + {name: "ExpandInt32x4", argLength: 2, commutative: false}, + {name: "ExpandInt32x8", argLength: 2, commutative: false}, + {name: "ExpandInt32x16", argLength: 2, commutative: false}, + {name: "ExpandInt64x2", argLength: 2, commutative: false}, + {name: "ExpandInt64x4", argLength: 2, commutative: false}, + {name: "ExpandInt64x8", argLength: 2, commutative: false}, + {name: "ExpandUint8x16", argLength: 2, commutative: false}, + {name: "ExpandUint8x32", argLength: 2, commutative: false}, + {name: "ExpandUint8x64", argLength: 2, commutative: false}, + {name: "ExpandUint16x8", argLength: 2, commutative: false}, + {name: "ExpandUint16x16", argLength: 2, commutative: false}, + {name: "ExpandUint16x32", argLength: 2, commutative: false}, + {name: "ExpandUint32x4", argLength: 2, commutative: false}, + {name: "ExpandUint32x8", argLength: 2, commutative: false}, + {name: "ExpandUint32x16", argLength: 2, commutative: false}, + {name: "ExpandUint64x2", argLength: 2, commutative: false}, + {name: "ExpandUint64x4", argLength: 2, commutative: false}, + {name: "ExpandUint64x8", argLength: 2, commutative: false}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index b39311cd90f105..2fafe10ea517b9 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1268,6 +1268,12 @@ const ( OpAMD64VDIVPSMasked128 OpAMD64VDIVPSMasked256 OpAMD64VDIVPSMasked512 + OpAMD64VEXPANDPDMasked128 + OpAMD64VEXPANDPDMasked256 + OpAMD64VEXPANDPDMasked512 + OpAMD64VEXPANDPSMasked128 + OpAMD64VEXPANDPSMasked256 + OpAMD64VEXPANDPSMasked512 OpAMD64VFMADD213PD128 OpAMD64VFMADD213PD256 OpAMD64VFMADD213PD512 @@ -1576,6 +1582,18 @@ const ( OpAMD64VPERMWMasked128 OpAMD64VPERMWMasked256 OpAMD64VPERMWMasked512 + OpAMD64VPEXPANDBMasked128 + OpAMD64VPEXPANDBMasked256 + OpAMD64VPEXPANDBMasked512 + OpAMD64VPEXPANDDMasked128 + OpAMD64VPEXPANDDMasked256 + OpAMD64VPEXPANDDMasked512 + OpAMD64VPEXPANDQMasked128 + OpAMD64VPEXPANDQMasked256 + OpAMD64VPEXPANDQMasked512 + OpAMD64VPEXPANDWMasked128 + OpAMD64VPEXPANDWMasked256 + OpAMD64VPEXPANDWMasked512 OpAMD64VPHADDD128 OpAMD64VPHADDD256 OpAMD64VPHADDSW128 @@ -4925,6 +4943,36 @@ const ( OpEqualUint64x2 OpEqualUint64x4 OpEqualUint64x8 + OpExpandFloat32x4 + OpExpandFloat32x8 + OpExpandFloat32x16 + OpExpandFloat64x2 + OpExpandFloat64x4 + OpExpandFloat64x8 + OpExpandInt8x16 + OpExpandInt8x32 + OpExpandInt8x64 + OpExpandInt16x8 + OpExpandInt16x16 + OpExpandInt16x32 + OpExpandInt32x4 + OpExpandInt32x8 + OpExpandInt32x16 + OpExpandInt64x2 + OpExpandInt64x4 + OpExpandInt64x8 + OpExpandUint8x16 + OpExpandUint8x32 + OpExpandUint8x64 + OpExpandUint16x8 + OpExpandUint16x16 + OpExpandUint16x32 + OpExpandUint32x4 + OpExpandUint32x8 + OpExpandUint32x16 + OpExpandUint64x2 + OpExpandUint64x4 + OpExpandUint64x8 OpFloorFloat32x4 OpFloorFloat32x8 OpFloorFloat64x2 @@ -20065,6 +20113,90 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXPANDPDMasked128", + argLen: 2, + asm: x86.AVEXPANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPDMasked256", + argLen: 2, + asm: x86.AVEXPANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPDMasked512", + argLen: 2, + asm: x86.AVEXPANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPSMasked128", + argLen: 2, + asm: x86.AVEXPANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPSMasked256", + argLen: 2, + asm: x86.AVEXPANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPSMasked512", + argLen: 2, + asm: x86.AVEXPANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VFMADD213PD128", argLen: 3, @@ -24788,6 +24920,174 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXPANDBMasked128", + argLen: 2, + asm: x86.AVPEXPANDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDBMasked256", + argLen: 2, + asm: x86.AVPEXPANDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDBMasked512", + argLen: 2, + asm: x86.AVPEXPANDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDDMasked128", + argLen: 2, + asm: x86.AVPEXPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDDMasked256", + argLen: 2, + asm: x86.AVPEXPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDDMasked512", + argLen: 2, + asm: x86.AVPEXPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDQMasked128", + argLen: 2, + asm: x86.AVPEXPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDQMasked256", + argLen: 2, + asm: x86.AVPEXPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDQMasked512", + argLen: 2, + asm: x86.AVPEXPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDWMasked128", + argLen: 2, + asm: x86.AVPEXPANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDWMasked256", + argLen: 2, + asm: x86.AVPEXPANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDWMasked512", + argLen: 2, + asm: x86.AVPEXPANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD128", argLen: 2, @@ -64829,6 +65129,156 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "ExpandFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt8x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt8x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt8x64", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt16x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt16x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt16x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt32x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt32x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt32x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt64x2", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt64x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint8x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint8x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint8x64", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint16x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint16x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint32x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint64x2", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint64x8", + argLen: 2, + generic: true, + }, { name: "FloorFloat32x4", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 91fd3fb470f080..6b63b7024597fc 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1754,6 +1754,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpEqualUint8x64: return rewriteValueAMD64_OpEqualUint8x64(v) + case OpExpandFloat32x16: + return rewriteValueAMD64_OpExpandFloat32x16(v) + case OpExpandFloat32x4: + return rewriteValueAMD64_OpExpandFloat32x4(v) + case OpExpandFloat32x8: + return rewriteValueAMD64_OpExpandFloat32x8(v) + case OpExpandFloat64x2: + return rewriteValueAMD64_OpExpandFloat64x2(v) + case OpExpandFloat64x4: + return rewriteValueAMD64_OpExpandFloat64x4(v) + case OpExpandFloat64x8: + return rewriteValueAMD64_OpExpandFloat64x8(v) + case OpExpandInt16x16: + return rewriteValueAMD64_OpExpandInt16x16(v) + case OpExpandInt16x32: + return rewriteValueAMD64_OpExpandInt16x32(v) + case OpExpandInt16x8: + return rewriteValueAMD64_OpExpandInt16x8(v) + case OpExpandInt32x16: + return rewriteValueAMD64_OpExpandInt32x16(v) + case OpExpandInt32x4: + return rewriteValueAMD64_OpExpandInt32x4(v) + case OpExpandInt32x8: + return rewriteValueAMD64_OpExpandInt32x8(v) + case OpExpandInt64x2: + return rewriteValueAMD64_OpExpandInt64x2(v) + case OpExpandInt64x4: + return rewriteValueAMD64_OpExpandInt64x4(v) + case OpExpandInt64x8: + return rewriteValueAMD64_OpExpandInt64x8(v) + case OpExpandInt8x16: + return rewriteValueAMD64_OpExpandInt8x16(v) + case OpExpandInt8x32: + return rewriteValueAMD64_OpExpandInt8x32(v) + case OpExpandInt8x64: + return rewriteValueAMD64_OpExpandInt8x64(v) + case OpExpandUint16x16: + return rewriteValueAMD64_OpExpandUint16x16(v) + case OpExpandUint16x32: + return rewriteValueAMD64_OpExpandUint16x32(v) + case OpExpandUint16x8: + return rewriteValueAMD64_OpExpandUint16x8(v) + case OpExpandUint32x16: + return rewriteValueAMD64_OpExpandUint32x16(v) + case OpExpandUint32x4: + return rewriteValueAMD64_OpExpandUint32x4(v) + case OpExpandUint32x8: + return rewriteValueAMD64_OpExpandUint32x8(v) + case OpExpandUint64x2: + return rewriteValueAMD64_OpExpandUint64x2(v) + case OpExpandUint64x4: + return rewriteValueAMD64_OpExpandUint64x4(v) + case OpExpandUint64x8: + return rewriteValueAMD64_OpExpandUint64x8(v) + case OpExpandUint8x16: + return rewriteValueAMD64_OpExpandUint8x16(v) + case OpExpandUint8x32: + return rewriteValueAMD64_OpExpandUint8x32(v) + case OpExpandUint8x64: + return rewriteValueAMD64_OpExpandUint8x64(v) case OpFMA: return rewriteValueAMD64_OpFMA(v) case OpFloor: @@ -34479,6 +34539,486 @@ func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpExpandFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat32x16 x mask) + // result: (VEXPANDPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat32x4 x mask) + // result: (VEXPANDPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat32x8 x mask) + // result: (VEXPANDPSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat64x2 x mask) + // result: (VEXPANDPDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat64x4 x mask) + // result: (VEXPANDPDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat64x8 x mask) + // result: (VEXPANDPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt16x16 x mask) + // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt16x32 x mask) + // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt16x8 x mask) + // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt32x16 x mask) + // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt32x4 x mask) + // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt32x8 x mask) + // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt64x2 x mask) + // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt64x4 x mask) + // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt64x8 x mask) + // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt8x16 x mask) + // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt8x32 x mask) + // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt8x64 x mask) + // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint16x16 x mask) + // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint16x32 x mask) + // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint16x8 x mask) + // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint32x16 x mask) + // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint32x4 x mask) + // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint32x8 x mask) + // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint64x2 x mask) + // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint64x4 x mask) + // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint64x8 x mask) + // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint8x16 x mask) + // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint8x32 x mask) + // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint8x64 x mask) + // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpFMA(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 873bb8e2de17c9..0f65b4500a1686 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -396,6 +396,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Expand", opLen2(ssa.OpExpandFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Expand", opLen2(ssa.OpExpandFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Expand", opLen2(ssa.OpExpandFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Expand", opLen2(ssa.OpExpandFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Expand", opLen2(ssa.OpExpandFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Expand", opLen2(ssa.OpExpandFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Expand", opLen2(ssa.OpExpandInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Expand", opLen2(ssa.OpExpandInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Expand", opLen2(ssa.OpExpandInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Expand", opLen2(ssa.OpExpandInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Expand", opLen2(ssa.OpExpandInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Expand", opLen2(ssa.OpExpandInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Expand", opLen2(ssa.OpExpandInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Expand", opLen2(ssa.OpExpandInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Expand", opLen2(ssa.OpExpandInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Expand", opLen2(ssa.OpExpandInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Expand", opLen2(ssa.OpExpandInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Expand", opLen2(ssa.OpExpandInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Expand", opLen2(ssa.OpExpandUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Expand", opLen2(ssa.OpExpandUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Expand", opLen2(ssa.OpExpandUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Expand", opLen2(ssa.OpExpandUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Expand", opLen2(ssa.OpExpandUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Expand", opLen2(ssa.OpExpandUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Expand", opLen2(ssa.OpExpandUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Expand", opLen2(ssa.OpExpandUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Expand", opLen2(ssa.OpExpandUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Expand", opLen2(ssa.OpExpandUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Expand", opLen2(ssa.OpExpandUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Expand", opLen2(ssa.OpExpandUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5eb8fea47691a2..2138271769db58 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -2399,6 +2399,188 @@ func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 +/* Expand */ + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPS, CPU Feature: AVX512F +func (x Float32x4) Expand(mask Mask32x4) Float32x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPS, CPU Feature: AVX512F +func (x Float32x8) Expand(mask Mask32x8) Float32x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPS, CPU Feature: AVX512F +func (x Float32x16) Expand(mask Mask32x16) Float32x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPD, CPU Feature: AVX512F +func (x Float64x2) Expand(mask Mask64x2) Float64x2 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPD, CPU Feature: AVX512F +func (x Float64x4) Expand(mask Mask64x4) Float64x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPD, CPU Feature: AVX512F +func (x Float64x8) Expand(mask Mask64x8) Float64x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Int8x16) Expand(mask Mask8x16) Int8x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Int8x32) Expand(mask Mask8x32) Int8x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Int8x64) Expand(mask Mask8x64) Int8x64 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Int16x8) Expand(mask Mask16x8) Int16x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Int16x16) Expand(mask Mask16x16) Int16x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Int16x32) Expand(mask Mask16x32) Int16x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Int32x4) Expand(mask Mask32x4) Int32x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Int32x8) Expand(mask Mask32x8) Int32x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Int32x16) Expand(mask Mask32x16) Int32x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Int64x2) Expand(mask Mask64x2) Int64x2 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Int64x4) Expand(mask Mask64x4) Int64x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Int64x8) Expand(mask Mask64x8) Int64x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x16) Expand(mask Mask8x16) Uint8x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x32) Expand(mask Mask8x32) Uint8x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x64) Expand(mask Mask8x64) Uint8x64 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x8) Expand(mask Mask16x8) Uint16x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x16) Expand(mask Mask16x16) Uint16x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x32) Expand(mask Mask16x32) Uint16x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Uint32x4) Expand(mask Mask32x4) Uint32x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Uint32x8) Expand(mask Mask32x8) Uint32x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Uint32x16) Expand(mask Mask32x16) Uint32x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Uint64x2) Expand(mask Mask64x2) Uint64x2 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Uint64x4) Expand(mask Mask64x4) Uint64x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Uint64x8) Expand(mask Mask64x8) Uint64x8 + /* Floor */ // Floor rounds elements down to the nearest integer. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 571834783887b4..9e9b45b5b8e422 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -187,6 +187,22 @@ func TestCompress(t *testing.T) { } } +func TestExpand(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + v3400 := simd.LoadInt32x4Slice([]int32{3, 4, 0, 0}) + v0101 := simd.LoadInt32x4Slice([]int32{0, -1, 0, -1}) + v2400 := v3400.Expand(v0101.AsMask32x4()) + got := make([]int32, 4) + v2400.StoreSlice(got) + want := []int32{0, 3, 0, 4} + if !slices.Equal(got, want) { + t.Errorf("want and got differ, want=%v, got=%v", want, got) + } +} + func TestPairDotProdAccumulate(t *testing.T) { if !simd.HasAVX512GFNI() { // TODO: this function is actually VNNI, let's implement and call the right check. From b226bcc4a9ae71dd75effbd020220590a29a68a9 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 6 Aug 2025 19:03:52 +0000 Subject: [PATCH 116/139] [dev.simd] cmd/compile, simd: add value conversion ToBits for mask This CL is generated by CL 693598. Change-Id: I949d3b3b4e5670cb30f0fb9dc779f7359409b54c Reviewed-on: https://go-review.googlesource.com/c/go/+/693755 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 3 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 68 ++-- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 13 +- .../compile/internal/ssa/_gen/genericOps.go | 14 + src/cmd/compile/internal/ssa/opGen.go | 144 +++++++- src/cmd/compile/internal/ssa/rewriteAMD64.go | 336 +++++++++++++++--- src/cmd/compile/internal/ssagen/intrinsics.go | 23 +- .../compile/internal/ssagen/simdintrinsics.go | 36 +- src/simd/simd_test.go | 10 + src/simd/types_amd64.go | 96 ++++- 10 files changed, 630 insertions(+), 113 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 8847580e25422f..9a4203f7c6786f 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1715,7 +1715,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpAMD64KMOVQ, ssa.OpAMD64KMOVD, ssa.OpAMD64KMOVW, ssa.OpAMD64KMOVB: + case ssa.OpAMD64KMOVQk, ssa.OpAMD64KMOVDk, ssa.OpAMD64KMOVWk, ssa.OpAMD64KMOVBk, + ssa.OpAMD64KMOVQi, ssa.OpAMD64KMOVDi, ssa.OpAMD64KMOVWi, ssa.OpAMD64KMOVBi: // See also ssa.OpAMD64KMOVQload p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index dd9deef4afb216..8da4a031b47725 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1669,21 +1669,21 @@ // XXX SIMD // Mask loads -(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) -(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) -(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) +(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) +(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) +(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) -(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) -(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) -(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) +(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) +(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) +(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) -(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) -(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) -(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) +(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) +(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) +(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) -(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) -(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) -(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) +(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) +(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) +(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) (StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) (StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) @@ -1703,22 +1703,40 @@ // TODO is this correct? Should we just do it all from 64-bits? -// Mask conversions (from integers) -(Cvt16toMask8x16 x) => (VPMOVMToVec8x16 (KMOVW x)) -(Cvt32toMask8x32 x) => (VPMOVMToVec8x32 (KMOVD x)) -(Cvt64toMask8x64 x) => (VPMOVMToVec8x64 (KMOVQ x)) +// Mask conversions +// integers to masks +(Cvt16toMask8x16 x) => (VPMOVMToVec8x16 (KMOVWk x)) +(Cvt32toMask8x32 x) => (VPMOVMToVec8x32 (KMOVDk x)) +(Cvt64toMask8x64 x) => (VPMOVMToVec8x64 (KMOVQk x)) -(Cvt8toMask16x8 x) => (VPMOVMToVec16x8 (KMOVB x)) -(Cvt16toMask16x16 x) => (VPMOVMToVec16x16 (KMOVW x)) -(Cvt32toMask16x32 x) => (VPMOVMToVec16x32 (KMOVD x)) +(Cvt8toMask16x8 x) => (VPMOVMToVec16x8 (KMOVBk x)) +(Cvt16toMask16x16 x) => (VPMOVMToVec16x16 (KMOVWk x)) +(Cvt32toMask16x32 x) => (VPMOVMToVec16x32 (KMOVDk x)) -(Cvt8toMask32x4 x) => (VPMOVMToVec32x4 (KMOVB x)) -(Cvt8toMask32x8 x) => (VPMOVMToVec32x8 (KMOVB x)) -(Cvt16toMask32x16 x) => (VPMOVMToVec32x16 (KMOVW x)) +(Cvt8toMask32x4 x) => (VPMOVMToVec32x4 (KMOVBk x)) +(Cvt8toMask32x8 x) => (VPMOVMToVec32x8 (KMOVBk x)) +(Cvt16toMask32x16 x) => (VPMOVMToVec32x16 (KMOVWk x)) -(Cvt8toMask64x2 x) => (VPMOVMToVec64x2 (KMOVB x)) -(Cvt8toMask64x4 x) => (VPMOVMToVec64x4 (KMOVB x)) -(Cvt8toMask64x8 x) => (VPMOVMToVec64x8 (KMOVB x)) +(Cvt8toMask64x2 x) => (VPMOVMToVec64x2 (KMOVBk x)) +(Cvt8toMask64x4 x) => (VPMOVMToVec64x4 (KMOVBk x)) +(Cvt8toMask64x8 x) => (VPMOVMToVec64x8 (KMOVBk x)) + +// masks to integers +(CvtMask8x16to16 x) => (KMOVWi (VPMOVVec8x16ToM x)) +(CvtMask8x32to32 x) => (KMOVDi (VPMOVVec8x32ToM x)) +(CvtMask8x64to64 x) => (KMOVQi (VPMOVVec8x64ToM x)) + +(CvtMask16x8to8 x) => (KMOVBi (VPMOVVec16x8ToM x)) +(CvtMask16x16to16 x) => (KMOVWi (VPMOVVec16x16ToM x)) +(CvtMask16x32to32 x) => (KMOVDi (VPMOVVec16x32ToM x)) + +(CvtMask32x4to8 x) => (KMOVBi (VPMOVVec32x4ToM x)) +(CvtMask32x8to8 x) => (KMOVBi (VPMOVVec32x8ToM x)) +(CvtMask32x16to16 x) => (KMOVWi (VPMOVVec32x16ToM x)) + +(CvtMask64x2to8 x) => (KMOVBi (VPMOVVec64x2ToM x)) +(CvtMask64x4to8 x) => (KMOVBi (VPMOVVec64x4ToM x)) +(CvtMask64x8to8 x) => (KMOVBi (VPMOVVec64x8ToM x)) // SIMD vector loads and stores (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index bc30e6574fe950..fdc80c9a805ed6 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -242,6 +242,7 @@ func init() { kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} gpk = regInfo{inputs: gponly, outputs: maskonly} + kgp = regInfo{inputs: maskonly, outputs: gponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1367,10 +1368,14 @@ func init() { {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // Move GP directly to mask register - {name: "KMOVQ", argLength: 1, reg: gpk, asm: "KMOVQ"}, - {name: "KMOVD", argLength: 1, reg: gpk, asm: "KMOVD"}, - {name: "KMOVW", argLength: 1, reg: gpk, asm: "KMOVW"}, - {name: "KMOVB", argLength: 1, reg: gpk, asm: "KMOVB"}, + {name: "KMOVQk", argLength: 1, reg: gpk, asm: "KMOVQ"}, + {name: "KMOVDk", argLength: 1, reg: gpk, asm: "KMOVD"}, + {name: "KMOVWk", argLength: 1, reg: gpk, asm: "KMOVW"}, + {name: "KMOVBk", argLength: 1, reg: gpk, asm: "KMOVB"}, + {name: "KMOVQi", argLength: 1, reg: kgp, asm: "KMOVQ"}, + {name: "KMOVDi", argLength: 1, reg: kgp, asm: "KMOVD"}, + {name: "KMOVWi", argLength: 1, reg: kgp, asm: "KMOVW"}, + {name: "KMOVBi", argLength: 1, reg: kgp, asm: "KMOVB"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 34514abc92fdef..26f3e758bdf8b4 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -717,6 +717,20 @@ var genericOps = []opData{ {name: "Cvt8toMask64x2", argLength: 1}, // arg0 = integer mask value {name: "Cvt8toMask64x4", argLength: 1}, // arg0 = integer mask value {name: "Cvt8toMask64x8", argLength: 1}, // arg0 = integer mask value + + // Convert masks to integers + {name: "CvtMask8x16to16", argLength: 1}, // arg0 = mask + {name: "CvtMask8x32to32", argLength: 1}, // arg0 = mask + {name: "CvtMask8x64to64", argLength: 1}, // arg0 = mask + {name: "CvtMask16x8to8", argLength: 1}, // arg0 = mask + {name: "CvtMask16x16to16", argLength: 1}, // arg0 = mask + {name: "CvtMask16x32to32", argLength: 1}, // arg0 = mask + {name: "CvtMask32x4to8", argLength: 1}, // arg0 = mask + {name: "CvtMask32x8to8", argLength: 1}, // arg0 = mask + {name: "CvtMask32x16to16", argLength: 1}, // arg0 = mask + {name: "CvtMask64x2to8", argLength: 1}, // arg0 = mask + {name: "CvtMask64x4to8", argLength: 1}, // arg0 = mask + {name: "CvtMask64x8to8", argLength: 1}, // arg0 = mask } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 2fafe10ea517b9..7c135ea692cee0 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1218,10 +1218,14 @@ const ( OpAMD64VZEROALL OpAMD64KMOVQload OpAMD64KMOVQstore - OpAMD64KMOVQ - OpAMD64KMOVD - OpAMD64KMOVW - OpAMD64KMOVB + OpAMD64KMOVQk + OpAMD64KMOVDk + OpAMD64KMOVWk + OpAMD64KMOVBk + OpAMD64KMOVQi + OpAMD64KMOVDi + OpAMD64KMOVWi + OpAMD64KMOVBi OpAMD64VADDPD128 OpAMD64VADDPD256 OpAMD64VADDPD512 @@ -4582,6 +4586,18 @@ const ( OpCvt8toMask64x2 OpCvt8toMask64x4 OpCvt8toMask64x8 + OpCvtMask8x16to16 + OpCvtMask8x32to32 + OpCvtMask8x64to64 + OpCvtMask16x8to8 + OpCvtMask16x16to16 + OpCvtMask16x32to32 + OpCvtMask32x4to8 + OpCvtMask32x8to8 + OpCvtMask32x16to16 + OpCvtMask64x2to8 + OpCvtMask64x4to8 + OpCvtMask64x8to8 OpAbsoluteInt8x16 OpAbsoluteInt8x32 OpAbsoluteInt8x64 @@ -19400,7 +19416,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVQ", + name: "KMOVQk", argLen: 1, asm: x86.AKMOVQ, reg: regInfo{ @@ -19413,7 +19429,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVD", + name: "KMOVDk", argLen: 1, asm: x86.AKMOVD, reg: regInfo{ @@ -19426,7 +19442,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVW", + name: "KMOVWk", argLen: 1, asm: x86.AKMOVW, reg: regInfo{ @@ -19439,7 +19455,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVB", + name: "KMOVBk", argLen: 1, asm: x86.AKMOVB, reg: regInfo{ @@ -19451,6 +19467,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "KMOVQi", + argLen: 1, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "KMOVDi", + argLen: 1, + asm: x86.AKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "KMOVWi", + argLen: 1, + asm: x86.AKMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "KMOVBi", + argLen: 1, + asm: x86.AKMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VADDPD128", argLen: 2, @@ -63129,6 +63197,66 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CvtMask8x16to16", + argLen: 1, + generic: true, + }, + { + name: "CvtMask8x32to32", + argLen: 1, + generic: true, + }, + { + name: "CvtMask8x64to64", + argLen: 1, + generic: true, + }, + { + name: "CvtMask16x8to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask16x16to16", + argLen: 1, + generic: true, + }, + { + name: "CvtMask16x32to32", + argLen: 1, + generic: true, + }, + { + name: "CvtMask32x4to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask32x8to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask32x16to16", + argLen: 1, + generic: true, + }, + { + name: "CvtMask64x2to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask64x4to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask64x8to8", + argLen: 1, + generic: true, + }, { name: "AbsoluteInt8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6b63b7024597fc..eacb30768f8a9d 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1541,6 +1541,30 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true + case OpCvtMask16x16to16: + return rewriteValueAMD64_OpCvtMask16x16to16(v) + case OpCvtMask16x32to32: + return rewriteValueAMD64_OpCvtMask16x32to32(v) + case OpCvtMask16x8to8: + return rewriteValueAMD64_OpCvtMask16x8to8(v) + case OpCvtMask32x16to16: + return rewriteValueAMD64_OpCvtMask32x16to16(v) + case OpCvtMask32x4to8: + return rewriteValueAMD64_OpCvtMask32x4to8(v) + case OpCvtMask32x8to8: + return rewriteValueAMD64_OpCvtMask32x8to8(v) + case OpCvtMask64x2to8: + return rewriteValueAMD64_OpCvtMask64x2to8(v) + case OpCvtMask64x4to8: + return rewriteValueAMD64_OpCvtMask64x4to8(v) + case OpCvtMask64x8to8: + return rewriteValueAMD64_OpCvtMask64x8to8(v) + case OpCvtMask8x16to16: + return rewriteValueAMD64_OpCvtMask8x16to16(v) + case OpCvtMask8x32to32: + return rewriteValueAMD64_OpCvtMask8x32to32(v) + case OpCvtMask8x64to64: + return rewriteValueAMD64_OpCvtMask8x64to64(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -33047,12 +33071,13 @@ func rewriteValueAMD64_OpCvt16toMask16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt16toMask16x16 x) - // result: (VPMOVMToVec16x16 (KMOVW x)) + // result: (VPMOVMToVec16x16 (KMOVWk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec16x16) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33062,12 +33087,13 @@ func rewriteValueAMD64_OpCvt16toMask32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt16toMask32x16 x) - // result: (VPMOVMToVec32x16 (KMOVW x)) + // result: (VPMOVMToVec32x16 (KMOVWk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec32x16) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33077,12 +33103,13 @@ func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt16toMask8x16 x) - // result: (VPMOVMToVec8x16 (KMOVW x)) + // result: (VPMOVMToVec8x16 (KMOVWk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec8x16) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33092,12 +33119,13 @@ func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt32toMask16x32 x) - // result: (VPMOVMToVec16x32 (KMOVD x)) + // result: (VPMOVMToVec16x32 (KMOVDk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec16x32) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33107,12 +33135,13 @@ func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt32toMask8x32 x) - // result: (VPMOVMToVec8x32 (KMOVD x)) + // result: (VPMOVMToVec8x32 (KMOVDk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec8x32) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33122,12 +33151,13 @@ func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt64toMask8x64 x) - // result: (VPMOVMToVec8x64 (KMOVQ x)) + // result: (VPMOVMToVec8x64 (KMOVQk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec8x64) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQ, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33137,12 +33167,13 @@ func rewriteValueAMD64_OpCvt8toMask16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask16x8 x) - // result: (VPMOVMToVec16x8 (KMOVB x)) + // result: (VPMOVMToVec16x8 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec16x8) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33152,12 +33183,13 @@ func rewriteValueAMD64_OpCvt8toMask32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask32x4 x) - // result: (VPMOVMToVec32x4 (KMOVB x)) + // result: (VPMOVMToVec32x4 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec32x4) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33167,12 +33199,13 @@ func rewriteValueAMD64_OpCvt8toMask32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask32x8 x) - // result: (VPMOVMToVec32x8 (KMOVB x)) + // result: (VPMOVMToVec32x8 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec32x8) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33182,12 +33215,13 @@ func rewriteValueAMD64_OpCvt8toMask64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask64x2 x) - // result: (VPMOVMToVec64x2 (KMOVB x)) + // result: (VPMOVMToVec64x2 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec64x2) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33197,12 +33231,13 @@ func rewriteValueAMD64_OpCvt8toMask64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask64x4 x) - // result: (VPMOVMToVec64x4 (KMOVB x)) + // result: (VPMOVMToVec64x4 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec64x4) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33212,12 +33247,205 @@ func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask64x8 x) - // result: (VPMOVMToVec64x8 (KMOVB x)) + // result: (VPMOVMToVec64x8 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec64x8) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask16x16to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask16x16to16 x) + // result: (KMOVWi (VPMOVVec16x16ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask16x32to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask16x32to32 x) + // result: (KMOVDi (VPMOVVec16x32ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVDi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask16x8to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask16x8to8 x) + // result: (KMOVBi (VPMOVVec16x8ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask32x16to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask32x16to16 x) + // result: (KMOVWi (VPMOVVec32x16ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask32x4to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask32x4to8 x) + // result: (KMOVBi (VPMOVVec32x4ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask32x8to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask32x8to8 x) + // result: (KMOVBi (VPMOVVec32x8ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask64x2to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask64x2to8 x) + // result: (KMOVBi (VPMOVVec64x2ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask64x4to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask64x4to8 x) + // result: (KMOVBi (VPMOVVec64x4ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask64x8to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask64x8to8 x) + // result: (KMOVBi (VPMOVVec64x8ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask8x16to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask8x16to16 x) + // result: (KMOVWi (VPMOVVec8x16ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask8x32to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask8x32to32 x) + // result: (KMOVDi (VPMOVVec8x32ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVDi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask8x64to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask8x64to64 x) + // result: (KMOVQi (VPMOVVec8x64ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVQi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(x) v.AddArg(v0) return true @@ -41827,13 +42055,14 @@ func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x16 ptr mem) - // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41844,13 +42073,14 @@ func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x32 ptr mem) - // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41861,13 +42091,14 @@ func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x8 ptr mem) - // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41878,13 +42109,14 @@ func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x16 ptr mem) - // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41895,13 +42127,14 @@ func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x4 ptr mem) - // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41912,13 +42145,14 @@ func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x8 ptr mem) - // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41929,13 +42163,14 @@ func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x2 ptr mem) - // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41946,13 +42181,14 @@ func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x4 ptr mem) - // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41963,13 +42199,14 @@ func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x8 ptr mem) - // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41980,13 +42217,14 @@ func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x16 ptr mem) - // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41997,13 +42235,14 @@ func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x32 ptr mem) - // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -42014,13 +42253,14 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x64 ptr mem) - // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index eae754da4e80fc..45ccb9c9998947 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1782,13 +1782,20 @@ var loadMaskOpcodes = map[int]map[int]ssa.Op{ 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, } -var cvtMaskOpcodes = map[int]map[int]ssa.Op{ +var cvtVToMaskOpcodes = map[int]map[int]ssa.Op{ 8: {16: ssa.OpCvt16toMask8x16, 32: ssa.OpCvt32toMask8x32, 64: ssa.OpCvt64toMask8x64}, 16: {8: ssa.OpCvt8toMask16x8, 16: ssa.OpCvt16toMask16x16, 32: ssa.OpCvt32toMask16x32}, 32: {4: ssa.OpCvt8toMask32x4, 8: ssa.OpCvt8toMask32x8, 16: ssa.OpCvt16toMask32x16}, 64: {2: ssa.OpCvt8toMask64x2, 4: ssa.OpCvt8toMask64x4, 8: ssa.OpCvt8toMask64x8}, } +var cvtMaskToVOpcodes = map[int]map[int]ssa.Op{ + 8: {16: ssa.OpCvtMask8x16to16, 32: ssa.OpCvtMask8x32to32, 64: ssa.OpCvtMask8x64to64}, + 16: {8: ssa.OpCvtMask16x8to8, 16: ssa.OpCvtMask16x16to16, 32: ssa.OpCvtMask16x32to32}, + 32: {4: ssa.OpCvtMask32x4to8, 8: ssa.OpCvtMask32x8to8, 16: ssa.OpCvtMask32x16to16}, + 64: {2: ssa.OpCvtMask64x2to8, 4: ssa.OpCvtMask64x4to8, 8: ssa.OpCvtMask64x8to8}, +} + func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { op := loadMaskOpcodes[elemBits][lanes] @@ -1816,9 +1823,9 @@ func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*s } } -func simdCvtMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { +func simdCvtVToMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - op := cvtMaskOpcodes[elemBits][lanes] + op := cvtVToMaskOpcodes[elemBits][lanes] if op == 0 { panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) } @@ -1826,6 +1833,16 @@ func simdCvtMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa } } +func simdCvtMaskToV(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + op := cvtMaskToVOpcodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + return s.newValue1(op, n.Type(), args[0]) + } +} + func simdMaskedLoad(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, n.Type(), args[0], args[1], s.mem()) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 0f65b4500a1686..c7f97e03a0dd83 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2314,82 +2314,94 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64) - addF(simdPackage, "Mask8x16FromBits", simdCvtMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16FromBits", simdCvtVToMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16.ToBits", simdCvtMaskToV(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64) - addF(simdPackage, "Mask8x32FromBits", simdCvtMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32FromBits", simdCvtVToMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32.ToBits", simdCvtMaskToV(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64) - addF(simdPackage, "Mask8x64FromBits", simdCvtMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64FromBits", simdCvtVToMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64.ToBits", simdCvtMaskToV(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64) - addF(simdPackage, "Mask16x8FromBits", simdCvtMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8FromBits", simdCvtVToMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8.ToBits", simdCvtMaskToV(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64) - addF(simdPackage, "Mask16x16FromBits", simdCvtMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16FromBits", simdCvtVToMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16.ToBits", simdCvtMaskToV(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64) - addF(simdPackage, "Mask16x32FromBits", simdCvtMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32FromBits", simdCvtVToMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32.ToBits", simdCvtMaskToV(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64) - addF(simdPackage, "Mask32x4FromBits", simdCvtMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4FromBits", simdCvtVToMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4.ToBits", simdCvtMaskToV(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64) - addF(simdPackage, "Mask32x8FromBits", simdCvtMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8FromBits", simdCvtVToMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8.ToBits", simdCvtMaskToV(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64) - addF(simdPackage, "Mask32x16FromBits", simdCvtMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16FromBits", simdCvtVToMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16.ToBits", simdCvtMaskToV(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64) - addF(simdPackage, "Mask64x2FromBits", simdCvtMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2FromBits", simdCvtVToMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2.ToBits", simdCvtMaskToV(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64) - addF(simdPackage, "Mask64x4FromBits", simdCvtMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4FromBits", simdCvtVToMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4.ToBits", simdCvtMaskToV(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64) - addF(simdPackage, "Mask64x8FromBits", simdCvtMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8FromBits", simdCvtVToMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8.ToBits", simdCvtMaskToV(64, 8), sys.AMD64) } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 9e9b45b5b8e422..7776a8afdaa2e9 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -391,3 +391,13 @@ func TestBitMaskFromBits(t *testing.T) { } } } + +func TestBitMaskToBits(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + if v := simd.LoadInt16x8Slice([]int16{-1, 0, -1, 0, 0, 0, 0, 0}).AsMask16x8().ToBits(); v != 0b101 { + t.Errorf("Want 0b101, got %b", v) + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index ac8cf3c210adde..f70a6a214b5f7a 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -320,9 +320,15 @@ func (x Mask8x16) StoreToBits(y *uint64) // Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // -// Asm: KMOVB, CPU Feature: AVX512" +// Asm: KMOVB, CPU Feature: AVX512 func Mask8x16FromBits(y uint16) Mask8x16 +// ToBits constructs a bitmap from a Mask8x16, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// Asm: KMOVB, CPU Features: AVX512 +func (x Mask8x16) ToBits() uint16 + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 @@ -348,9 +354,15 @@ func (x Mask16x8) StoreToBits(y *uint64) // Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // -// Asm: KMOVW, CPU Feature: AVX512" +// Asm: KMOVW, CPU Feature: AVX512 func Mask16x8FromBits(y uint8) Mask16x8 +// ToBits constructs a bitmap from a Mask16x8, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// Asm: KMOVW, CPU Features: AVX512 +func (x Mask16x8) ToBits() uint8 + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 @@ -376,9 +388,15 @@ func (x Mask32x4) StoreToBits(y *uint64) // Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // -// Asm: KMOVD, CPU Feature: AVX512" +// Asm: KMOVD, CPU Feature: AVX512 func Mask32x4FromBits(y uint8) Mask32x4 +// ToBits constructs a bitmap from a Mask32x4, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// Asm: KMOVD, CPU Features: AVX512 +func (x Mask32x4) ToBits() uint8 + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 @@ -404,9 +422,15 @@ func (x Mask64x2) StoreToBits(y *uint64) // Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 2 bits of y are used. // -// Asm: KMOVQ, CPU Feature: AVX512" +// Asm: KMOVQ, CPU Feature: AVX512 func Mask64x2FromBits(y uint8) Mask64x2 +// ToBits constructs a bitmap from a Mask64x2, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +// +// Asm: KMOVQ, CPU Features: AVX512 +func (x Mask64x2) ToBits() uint8 + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -723,9 +747,15 @@ func (x Mask8x32) StoreToBits(y *uint64) // Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // -// Asm: KMOVB, CPU Feature: AVX512" +// Asm: KMOVB, CPU Feature: AVX512 func Mask8x32FromBits(y uint32) Mask8x32 +// ToBits constructs a bitmap from a Mask8x32, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// Asm: KMOVB, CPU Features: AVX512 +func (x Mask8x32) ToBits() uint32 + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 @@ -751,9 +781,15 @@ func (x Mask16x16) StoreToBits(y *uint64) // Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // -// Asm: KMOVW, CPU Feature: AVX512" +// Asm: KMOVW, CPU Feature: AVX512 func Mask16x16FromBits(y uint16) Mask16x16 +// ToBits constructs a bitmap from a Mask16x16, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// Asm: KMOVW, CPU Features: AVX512 +func (x Mask16x16) ToBits() uint16 + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 @@ -779,9 +815,15 @@ func (x Mask32x8) StoreToBits(y *uint64) // Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // -// Asm: KMOVD, CPU Feature: AVX512" +// Asm: KMOVD, CPU Feature: AVX512 func Mask32x8FromBits(y uint8) Mask32x8 +// ToBits constructs a bitmap from a Mask32x8, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// Asm: KMOVD, CPU Features: AVX512 +func (x Mask32x8) ToBits() uint8 + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 @@ -807,9 +849,15 @@ func (x Mask64x4) StoreToBits(y *uint64) // Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // -// Asm: KMOVQ, CPU Feature: AVX512" +// Asm: KMOVQ, CPU Feature: AVX512 func Mask64x4FromBits(y uint8) Mask64x4 +// ToBits constructs a bitmap from a Mask64x4, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// Asm: KMOVQ, CPU Features: AVX512 +func (x Mask64x4) ToBits() uint8 + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -1190,9 +1238,15 @@ func (x Mask8x64) StoreToBits(y *uint64) // Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 64 bits of y are used. // -// Asm: KMOVB, CPU Feature: AVX512" +// Asm: KMOVB, CPU Feature: AVX512 func Mask8x64FromBits(y uint64) Mask8x64 +// ToBits constructs a bitmap from a Mask8x64, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +// +// Asm: KMOVB, CPU Features: AVX512 +func (x Mask8x64) ToBits() uint64 + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 @@ -1218,9 +1272,15 @@ func (x Mask16x32) StoreToBits(y *uint64) // Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // -// Asm: KMOVW, CPU Feature: AVX512" +// Asm: KMOVW, CPU Feature: AVX512 func Mask16x32FromBits(y uint32) Mask16x32 +// ToBits constructs a bitmap from a Mask16x32, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// Asm: KMOVW, CPU Features: AVX512 +func (x Mask16x32) ToBits() uint32 + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 @@ -1246,9 +1306,15 @@ func (x Mask32x16) StoreToBits(y *uint64) // Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // -// Asm: KMOVD, CPU Feature: AVX512" +// Asm: KMOVD, CPU Feature: AVX512 func Mask32x16FromBits(y uint16) Mask32x16 +// ToBits constructs a bitmap from a Mask32x16, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// Asm: KMOVD, CPU Features: AVX512 +func (x Mask32x16) ToBits() uint16 + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 @@ -1274,5 +1340,11 @@ func (x Mask64x8) StoreToBits(y *uint64) // Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // -// Asm: KMOVQ, CPU Feature: AVX512" +// Asm: KMOVQ, CPU Feature: AVX512 func Mask64x8FromBits(y uint8) Mask64x8 + +// ToBits constructs a bitmap from a Mask64x8, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// Asm: KMOVQ, CPU Features: AVX512 +func (x Mask64x8) ToBits() uint8 From 8eb5f6020e707672a846f0f83011b87e48039550 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 7 Aug 2025 17:05:50 +0000 Subject: [PATCH 117/139] [dev.simd] cmd/compile, simd: API interface fixes - Absolute -> Abs - ApproximateReciprocal -> Reciprocal - Other derived apis also changed. - Round -> RoundToEven - Other derived apis also changed. - Drop DotProdBroadcast - Fused(Mul|Add)(Mul|Add)? -> remove the "Fused" - MulEvenWiden -> remove 64bit - MulLow -> Mul, add unit - PairDotProd -> DotProdPairs - make AddDotProdPairs machine ops only - peepholes will be in another CL at dev.simd. - PopCount -> OnesCount - Saturated* -> *Saturated - Fix (Add|Sub)Saturated uint mappings. - UnsignedSignedQuadDotProdAccumulate -> AddDotProdQuadruple - The "DotProdQuadruple" instruction does not exist, so no peepholes for this. This CL is generated by CL 694095. Change-Id: If4110cc04ab96240cf56f2348d35ed2a719687de Reviewed-on: https://go-review.googlesource.com/c/go/+/694115 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 308 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 493 ++- .../compile/internal/ssa/_gen/simdAMD64ops.go | 41 +- .../internal/ssa/_gen/simdgenericOps.go | 437 +- src/cmd/compile/internal/ssa/opGen.go | 2629 ++++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 3513 ++++++++--------- .../compile/internal/ssagen/simdintrinsics.go | 437 +- src/simd/ops_amd64.go | 2387 ++++++----- src/simd/simd_test.go | 19 - src/simd/ternary_test.go | 12 +- src/simd/unary_test.go | 32 +- 11 files changed, 5064 insertions(+), 5244 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index b778cd7994a7b3..274602c0a757ce 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -24,18 +24,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VRCPPS128, - ssa.OpAMD64VRCPPS256, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VRSQRT14PD512, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, ssa.OpAMD64VCVTTPS2DQ512, @@ -54,6 +42,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTQ128, ssa.OpAMD64VPOPCNTQ256, ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VRCPPS128, + ssa.OpAMD64VRCPPS256, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VRSQRT14PD128, + ssa.OpAMD64VRSQRT14PD256, + ssa.OpAMD64VRSQRT14PD512, ssa.OpAMD64VSQRTPS128, ssa.OpAMD64VSQRTPS256, ssa.OpAMD64VSQRTPS512, @@ -96,6 +96,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSW128, ssa.OpAMD64VPADDSW256, ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPADDUSB128, + ssa.OpAMD64VPADDUSB256, + ssa.OpAMD64VPADDUSB512, + ssa.OpAMD64VPADDUSW128, + ssa.OpAMD64VPADDUSW256, + ssa.OpAMD64VPADDUSW512, ssa.OpAMD64VADDSUBPS128, ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, @@ -114,12 +120,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGW128, ssa.OpAMD64VPAVGW256, ssa.OpAMD64VPAVGW512, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VPSIGND256, ssa.OpAMD64VDIVPS128, ssa.OpAMD64VDIVPS256, ssa.OpAMD64VDIVPS512, ssa.OpAMD64VDIVPD128, ssa.OpAMD64VDIVPD256, ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPMADDWD128, + ssa.OpAMD64VPMADDWD256, + ssa.OpAMD64VPMADDWD512, + ssa.OpAMD64VPMADDUBSW128, + ssa.OpAMD64VPMADDUBSW256, + ssa.OpAMD64VPMADDUBSW512, ssa.OpAMD64VPCMPEQB128, ssa.OpAMD64VPCMPEQB256, ssa.OpAMD64VPCMPEQW128, @@ -216,23 +234,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VPMULDQ128, ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPMULDQ512, ssa.OpAMD64VPMULUDQ128, ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VPMULHW512, ssa.OpAMD64VPMULHUW128, ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPMULHW512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPMADDWD128, - ssa.OpAMD64VPMADDWD256, - ssa.OpAMD64VPMADDWD512, ssa.OpAMD64VPERMB128, ssa.OpAMD64VPERMB256, ssa.OpAMD64VPERMB512, @@ -259,9 +269,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQ128, ssa.OpAMD64VPRORVQ256, ssa.OpAMD64VPRORVQ512, - ssa.OpAMD64VPMADDUBSW128, - ssa.OpAMD64VPMADDUBSW256, - ssa.OpAMD64VPMADDUBSW512, ssa.OpAMD64VSCALEFPS128, ssa.OpAMD64VSCALEFPS256, ssa.OpAMD64VSCALEFPS512, @@ -295,12 +302,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRLVQ128, ssa.OpAMD64VPSRLVQ256, ssa.OpAMD64VPSRLVQ512, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VPSIGND256, ssa.OpAMD64VSUBPS128, ssa.OpAMD64VSUBPS256, ssa.OpAMD64VSUBPS512, @@ -335,6 +336,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSW128, ssa.OpAMD64VPSUBSW256, ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPSUBUSB128, + ssa.OpAMD64VPSUBUSB256, + ssa.OpAMD64VPSUBUSB512, + ssa.OpAMD64VPSUBUSW128, + ssa.OpAMD64VPSUBUSW256, + ssa.OpAMD64VPSUBUSW512, ssa.OpAMD64VPXOR128, ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, @@ -375,6 +382,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDUSBMasked128, + ssa.OpAMD64VPADDUSBMasked256, + ssa.OpAMD64VPADDUSBMasked512, + ssa.OpAMD64VPADDUSWMasked128, + ssa.OpAMD64VPADDUSWMasked256, + ssa.OpAMD64VPADDUSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -399,6 +412,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VGF2P8MULBMasked128, ssa.OpAMD64VGF2P8MULBMasked256, ssa.OpAMD64VGF2P8MULBMasked512, @@ -462,17 +481,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VPMULHWMasked512, ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, @@ -495,9 +505,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPMADDWDMasked256, - ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPERMBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, @@ -524,9 +531,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, - ssa.OpAMD64VPMADDUBSWMasked256, - ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -584,6 +588,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSWMasked128, ssa.OpAMD64VPSUBSWMasked256, ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBUSBMasked128, + ssa.OpAMD64VPSUBUSBMasked256, + ssa.OpAMD64VPSUBUSBMasked512, + ssa.OpAMD64VPSUBUSWMasked128, + ssa.OpAMD64VPSUBUSWMasked256, + ssa.OpAMD64VPSUBUSWMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, @@ -608,18 +618,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VCOMPRESSPSMasked128, ssa.OpAMD64VCOMPRESSPSMasked256, ssa.OpAMD64VCOMPRESSPSMasked512, @@ -674,6 +672,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, ssa.OpAMD64VSQRTPSMasked512, @@ -800,10 +810,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQMasked512const: p = simdVkvImm8(s, v) - case ssa.OpAMD64VDPPS128, - ssa.OpAMD64VDPPS256, - ssa.OpAMD64VDPPD128, - ssa.OpAMD64VCMPPS128, + case ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256, @@ -900,6 +907,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VPDPWSSDS128, + ssa.OpAMD64VPDPWSSDS256, + ssa.OpAMD64VPDPWSSDS512, + ssa.OpAMD64VPDPBUSD128, + ssa.OpAMD64VPDPBUSD256, + ssa.OpAMD64VPDPBUSD512, + ssa.OpAMD64VPDPBUSDS128, + ssa.OpAMD64VPDPBUSDS256, + ssa.OpAMD64VPDPBUSDS512, ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, ssa.OpAMD64VFMADD213PS512, @@ -936,12 +952,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2Q256, ssa.OpAMD64VPERMI2PD512, ssa.OpAMD64VPERMI2Q512, - ssa.OpAMD64VPDPWSSDS128, - ssa.OpAMD64VPDPWSSDS256, - ssa.OpAMD64VPDPWSSDS512, - ssa.OpAMD64VPDPBUSDS128, - ssa.OpAMD64VPDPBUSDS256, - ssa.OpAMD64VPDPBUSDS512, ssa.OpAMD64VPSHLDVW128, ssa.OpAMD64VPSHLDVW256, ssa.OpAMD64VPSHLDVW512, @@ -959,15 +969,21 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVD512, ssa.OpAMD64VPSHRDVQ128, ssa.OpAMD64VPSHRDVQ256, - ssa.OpAMD64VPSHRDVQ512, - ssa.OpAMD64VPDPBUSD128, - ssa.OpAMD64VPDPBUSD256, - ssa.OpAMD64VPDPBUSD512: + ssa.OpAMD64VPSHRDVQ512: p = simdV31ResultInArg0(s, v) case ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, @@ -1004,12 +1020,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2QMasked256, ssa.OpAMD64VPERMI2PDMasked512, ssa.OpAMD64VPERMI2QMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, - ssa.OpAMD64VPDPBUSDSMasked128, - ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VPSHLDVWMasked128, ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, @@ -1027,10 +1037,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVDMasked512, ssa.OpAMD64VPSHRDVQMasked128, ssa.OpAMD64VPSHRDVQMasked256, - ssa.OpAMD64VPSHRDVQMasked512, - ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VPDPBUSDMasked512: + ssa.OpAMD64VPSHRDVQMasked512: p = simdV3kvResultInArg0(s, v) case ssa.OpAMD64VPSLLW128, @@ -1151,6 +1158,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPSMasked512, @@ -1175,6 +1191,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDUSBMasked128, + ssa.OpAMD64VPADDUSBMasked256, + ssa.OpAMD64VPADDUSBMasked512, + ssa.OpAMD64VPADDUSWMasked128, + ssa.OpAMD64VPADDUSWMasked256, + ssa.OpAMD64VPADDUSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -1187,18 +1209,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPANDNQMasked128, ssa.OpAMD64VPANDNQMasked256, ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, @@ -1247,6 +1257,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VEXPANDPSMasked128, ssa.OpAMD64VEXPANDPSMasked256, ssa.OpAMD64VEXPANDPSMasked512, @@ -1265,24 +1281,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXPANDQMasked128, ssa.OpAMD64VPEXPANDQMasked256, ssa.OpAMD64VPEXPANDQMasked512, - ssa.OpAMD64VFMADD213PSMasked128, - ssa.OpAMD64VFMADD213PSMasked256, - ssa.OpAMD64VFMADD213PSMasked512, - ssa.OpAMD64VFMADD213PDMasked128, - ssa.OpAMD64VFMADD213PDMasked256, - ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADDSUB213PSMasked128, - ssa.OpAMD64VFMADDSUB213PSMasked256, - ssa.OpAMD64VFMADDSUB213PSMasked512, - ssa.OpAMD64VFMADDSUB213PDMasked128, - ssa.OpAMD64VFMADDSUB213PDMasked256, - ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMSUBADD213PSMasked128, - ssa.OpAMD64VFMSUBADD213PSMasked256, - ssa.OpAMD64VFMSUBADD213PSMasked512, - ssa.OpAMD64VFMSUBADD213PDMasked128, - ssa.OpAMD64VFMSUBADD213PDMasked256, - ssa.OpAMD64VFMSUBADD213PDMasked512, ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, @@ -1352,17 +1350,20 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VFMADD213PSMasked128, + ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PSMasked512, + ssa.OpAMD64VFMADD213PDMasked128, + ssa.OpAMD64VFMADD213PDMasked256, + ssa.OpAMD64VFMADD213PDMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked128, + ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PSMasked512, + ssa.OpAMD64VFMADDSUB213PDMasked128, + ssa.OpAMD64VFMADDSUB213PDMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked512, ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, @@ -1379,15 +1380,30 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked128, + ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PSMasked512, + ssa.OpAMD64VFMSUBADD213PDMasked128, + ssa.OpAMD64VFMSUBADD213PDMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPOPCNTQMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPMADDWDMasked256, - ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPERMI2BMasked128, ssa.OpAMD64VPERMI2BMasked256, ssa.OpAMD64VPERMI2BMasked512, @@ -1420,18 +1436,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMQMasked256, ssa.OpAMD64VPERMPDMasked512, ssa.OpAMD64VPERMQMasked512, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VPROLDMasked128, ssa.OpAMD64VPROLDMasked256, ssa.OpAMD64VPROLDMasked512, @@ -1456,15 +1472,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, - ssa.OpAMD64VPMADDUBSWMasked256, - ssa.OpAMD64VPMADDUBSWMasked512, - ssa.OpAMD64VPDPBUSDSMasked128, - ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -1591,9 +1598,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSWMasked128, ssa.OpAMD64VPSUBSWMasked256, ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPSUBUSBMasked128, + ssa.OpAMD64VPSUBUSBMasked256, + ssa.OpAMD64VPSUBUSBMasked512, + ssa.OpAMD64VPSUBUSWMasked128, + ssa.OpAMD64VPSUBUSWMasked256, + ssa.OpAMD64VPSUBUSWMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index ae29a9117ea16c..e294836cd26cc2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,29 +1,29 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -(AbsoluteInt8x16 ...) => (VPABSB128 ...) -(AbsoluteInt8x32 ...) => (VPABSB256 ...) -(AbsoluteInt8x64 ...) => (VPABSB512 ...) -(AbsoluteInt16x8 ...) => (VPABSW128 ...) -(AbsoluteInt16x16 ...) => (VPABSW256 ...) -(AbsoluteInt16x32 ...) => (VPABSW512 ...) -(AbsoluteInt32x4 ...) => (VPABSD128 ...) -(AbsoluteInt32x8 ...) => (VPABSD256 ...) -(AbsoluteInt32x16 ...) => (VPABSD512 ...) -(AbsoluteInt64x2 ...) => (VPABSQ128 ...) -(AbsoluteInt64x4 ...) => (VPABSQ256 ...) -(AbsoluteInt64x8 ...) => (VPABSQ512 ...) -(AbsoluteMaskedInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(AbsoluteMaskedInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(AbsoluteMaskedInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(AbsoluteMaskedInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(AbsoluteMaskedInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(AbsoluteMaskedInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(AbsoluteMaskedInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(AbsoluteMaskedInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(AbsoluteMaskedInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(AbsoluteMaskedInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(AbsoluteMaskedInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(AbsoluteMaskedInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) +(AbsInt8x16 ...) => (VPABSB128 ...) +(AbsInt8x32 ...) => (VPABSB256 ...) +(AbsInt8x64 ...) => (VPABSB512 ...) +(AbsInt16x8 ...) => (VPABSW128 ...) +(AbsInt16x16 ...) => (VPABSW256 ...) +(AbsInt16x32 ...) => (VPABSW512 ...) +(AbsInt32x4 ...) => (VPABSD128 ...) +(AbsInt32x8 ...) => (VPABSD256 ...) +(AbsInt32x16 ...) => (VPABSD512 ...) +(AbsInt64x2 ...) => (VPABSQ128 ...) +(AbsInt64x4 ...) => (VPABSQ256 ...) +(AbsInt64x8 ...) => (VPABSQ512 ...) +(AbsMaskedInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(AbsMaskedInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(AbsMaskedInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(AbsMaskedInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(AbsMaskedInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(AbsMaskedInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(AbsMaskedInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(AbsMaskedInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(AbsMaskedInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(AbsMaskedInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(AbsMaskedInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(AbsMaskedInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) (AddFloat32x4 ...) => (VADDPS128 ...) (AddFloat32x8 ...) => (VADDPS256 ...) (AddFloat32x16 ...) => (VADDPS512 ...) @@ -54,12 +54,24 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) -(AddDotProdInt32x4 ...) => (VPDPWSSD128 ...) -(AddDotProdInt32x8 ...) => (VPDPWSSD256 ...) -(AddDotProdInt32x16 ...) => (VPDPWSSD512 ...) -(AddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(AddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(AddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(AddDotProdPairsSaturatedInt32x4 ...) => (VPDPWSSDS128 ...) +(AddDotProdPairsSaturatedInt32x8 ...) => (VPDPWSSDS256 ...) +(AddDotProdPairsSaturatedInt32x16 ...) => (VPDPWSSDS512 ...) +(AddDotProdPairsSaturatedMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdPairsSaturatedMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdPairsSaturatedMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(AddDotProdQuadrupleInt32x4 ...) => (VPDPBUSD128 ...) +(AddDotProdQuadrupleInt32x8 ...) => (VPDPBUSD256 ...) +(AddDotProdQuadrupleInt32x16 ...) => (VPDPBUSD512 ...) +(AddDotProdQuadrupleMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdQuadrupleMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdQuadrupleMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(AddDotProdQuadrupleSaturatedInt32x4 ...) => (VPDPBUSDS128 ...) +(AddDotProdQuadrupleSaturatedInt32x8 ...) => (VPDPBUSDS256 ...) +(AddDotProdQuadrupleSaturatedInt32x16 ...) => (VPDPBUSDS512 ...) +(AddDotProdQuadrupleSaturatedMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdQuadrupleSaturatedMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdQuadrupleSaturatedMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (AddMaskedFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) (AddMaskedFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) (AddMaskedFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) @@ -110,24 +122,24 @@ (AddSaturatedInt16x8 ...) => (VPADDSW128 ...) (AddSaturatedInt16x16 ...) => (VPADDSW256 ...) (AddSaturatedInt16x32 ...) => (VPADDSW512 ...) -(AddSaturatedUint8x16 ...) => (VPADDSB128 ...) -(AddSaturatedUint8x32 ...) => (VPADDSB256 ...) -(AddSaturatedUint8x64 ...) => (VPADDSB512 ...) -(AddSaturatedUint16x8 ...) => (VPADDSW128 ...) -(AddSaturatedUint16x16 ...) => (VPADDSW256 ...) -(AddSaturatedUint16x32 ...) => (VPADDSW512 ...) +(AddSaturatedUint8x16 ...) => (VPADDUSB128 ...) +(AddSaturatedUint8x32 ...) => (VPADDUSB256 ...) +(AddSaturatedUint8x64 ...) => (VPADDUSB512 ...) +(AddSaturatedUint16x8 ...) => (VPADDUSW128 ...) +(AddSaturatedUint16x16 ...) => (VPADDUSW256 ...) +(AddSaturatedUint16x32 ...) => (VPADDUSW512 ...) (AddSaturatedMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (AddSaturatedMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (AddSaturatedMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) (AddSaturatedMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) (AddSaturatedMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) (AddSaturatedMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(AddSaturatedMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(AddSaturatedMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(AddSaturatedMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(AddSaturatedMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(AddSaturatedMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(AddSaturatedMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddSaturatedMaskedUint8x16 x y mask) => (VPADDUSBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddSaturatedMaskedUint8x32 x y mask) => (VPADDUSBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddSaturatedMaskedUint8x64 x y mask) => (VPADDUSBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddSaturatedMaskedUint16x8 x y mask) => (VPADDUSWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddSaturatedMaskedUint16x16 x y mask) => (VPADDUSWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddSaturatedMaskedUint16x32 x y mask) => (VPADDUSWMasked512 x y (VPMOVVec16x32ToM mask)) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) @@ -204,30 +216,6 @@ (AndNotMaskedUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (AndNotMaskedUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (AndNotMaskedUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(ApproximateReciprocalFloat32x4 ...) => (VRCPPS128 ...) -(ApproximateReciprocalFloat32x8 ...) => (VRCPPS256 ...) -(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) -(ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) -(ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) -(ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) -(ApproximateReciprocalMaskedFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(ApproximateReciprocalMaskedFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(ApproximateReciprocalMaskedFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(ApproximateReciprocalMaskedFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(ApproximateReciprocalMaskedFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(ApproximateReciprocalMaskedFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) -(ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) -(ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) -(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) -(ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) -(ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) -(ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) -(ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) @@ -310,6 +298,12 @@ (ConvertToUint32MaskedFloat32x4 x mask) => (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) (ConvertToUint32MaskedFloat32x8 x mask) => (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) (ConvertToUint32MaskedFloat32x16 x mask) => (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) +(CopySignInt8x16 ...) => (VPSIGNB128 ...) +(CopySignInt8x32 ...) => (VPSIGNB256 ...) +(CopySignInt16x8 ...) => (VPSIGNW128 ...) +(CopySignInt16x16 ...) => (VPSIGNW256 ...) +(CopySignInt32x4 ...) => (VPSIGND128 ...) +(CopySignInt32x8 ...) => (VPSIGND256 ...) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) (DivFloat32x16 ...) => (VDIVPS512 ...) @@ -322,9 +316,18 @@ (DivMaskedFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) (DivMaskedFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) (DivMaskedFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) -(DotProdBroadcastFloat32x4 x y) => (VDPPS128 [127] x y) -(DotProdBroadcastFloat32x8 x y) => (VDPPS256 [127] x y) -(DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) +(DotProdPairsInt16x8 ...) => (VPMADDWD128 ...) +(DotProdPairsInt16x16 ...) => (VPMADDWD256 ...) +(DotProdPairsInt16x32 ...) => (VPMADDWD512 ...) +(DotProdPairsMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) +(DotProdPairsMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) +(DotProdPairsMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) +(DotProdPairsSaturatedUint8x16 ...) => (VPMADDUBSW128 ...) +(DotProdPairsSaturatedUint8x32 ...) => (VPMADDUBSW256 ...) +(DotProdPairsSaturatedUint8x64 ...) => (VPMADDUBSW512 ...) +(DotProdPairsSaturatedMaskedUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(DotProdPairsSaturatedMaskedUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(DotProdPairsSaturatedMaskedUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) (EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) @@ -443,42 +446,6 @@ (FloorScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (FloorScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (FloorScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) -(FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) -(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) -(FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) -(FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) -(FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) -(FusedMultiplyAddMaskedFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(FusedMultiplyAddMaskedFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(FusedMultiplyAddMaskedFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(FusedMultiplyAddMaskedFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(FusedMultiplyAddMaskedFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(FusedMultiplyAddMaskedFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) -(FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) -(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) -(FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) -(FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) -(FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) -(FusedMultiplyAddSubMaskedFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(FusedMultiplyAddSubMaskedFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(FusedMultiplyAddSubMaskedFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(FusedMultiplyAddSubMaskedFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(FusedMultiplyAddSubMaskedFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(FusedMultiplyAddSubMaskedFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) -(FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) -(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) -(FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) -(FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) -(FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) -(FusedMultiplySubAddMaskedFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(FusedMultiplySubAddMaskedFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(FusedMultiplySubAddMaskedFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(FusedMultiplySubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(FusedMultiplySubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(FusedMultiplySubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (GaloisFieldAffineTransformUint8x16 ...) => (VGF2P8AFFINEQB128 ...) (GaloisFieldAffineTransformUint8x32 ...) => (VGF2P8AFFINEQB256 ...) (GaloisFieldAffineTransformUint8x64 ...) => (VGF2P8AFFINEQB512 ...) @@ -932,34 +899,49 @@ (MulInt64x2 ...) => (VPMULLQ128 ...) (MulInt64x4 ...) => (VPMULLQ256 ...) (MulInt64x8 ...) => (VPMULLQ512 ...) +(MulUint16x8 ...) => (VPMULLW128 ...) +(MulUint16x16 ...) => (VPMULLW256 ...) +(MulUint16x32 ...) => (VPMULLW512 ...) +(MulUint32x4 ...) => (VPMULLD128 ...) +(MulUint32x8 ...) => (VPMULLD256 ...) +(MulUint32x16 ...) => (VPMULLD512 ...) +(MulUint64x2 ...) => (VPMULLQ128 ...) +(MulUint64x4 ...) => (VPMULLQ256 ...) +(MulUint64x8 ...) => (VPMULLQ512 ...) +(MulAddFloat32x4 ...) => (VFMADD213PS128 ...) +(MulAddFloat32x8 ...) => (VFMADD213PS256 ...) +(MulAddFloat32x16 ...) => (VFMADD213PS512 ...) +(MulAddFloat64x2 ...) => (VFMADD213PD128 ...) +(MulAddFloat64x4 ...) => (VFMADD213PD256 ...) +(MulAddFloat64x8 ...) => (VFMADD213PD512 ...) +(MulAddMaskedFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MulAddMaskedFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MulAddMaskedFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MulAddMaskedFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MulAddMaskedFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MulAddMaskedFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MulAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) +(MulAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) +(MulAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) +(MulAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) +(MulAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) +(MulAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) +(MulAddSubMaskedFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MulAddSubMaskedFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MulAddSubMaskedFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MulAddSubMaskedFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MulAddSubMaskedFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MulAddSubMaskedFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) -(MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) -(MulEvenWidenInt64x4 ...) => (VPMULDQ256 ...) -(MulEvenWidenInt64x8 ...) => (VPMULDQ512 ...) (MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint32x8 ...) => (VPMULUDQ256 ...) -(MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) -(MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) -(MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) -(MulEvenWidenMaskedInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulEvenWidenMaskedInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulEvenWidenMaskedInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MulEvenWidenMaskedUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulEvenWidenMaskedUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulEvenWidenMaskedUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MulHighInt16x8 ...) => (VPMULHW128 ...) -(MulHighInt16x16 ...) => (VPMULHW256 ...) +(MulHighInt16x8 ...) => (VPMULHUW128 ...) +(MulHighInt16x16 ...) => (VPMULHUW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) -(MulHighUint16x8 ...) => (VPMULHUW128 ...) -(MulHighUint16x16 ...) => (VPMULHUW256 ...) -(MulHighUint16x32 ...) => (VPMULHUW512 ...) -(MulHighMaskedInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedInt16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) (MulHighMaskedInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedInt16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) (MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) (MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) @@ -975,6 +957,27 @@ (MulMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) (MulMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) (MulMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulMaskedUint16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulMaskedUint16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulMaskedUint16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulMaskedUint32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MulMaskedUint32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MulMaskedUint32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MulMaskedUint64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulMaskedUint64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulMaskedUint64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulSubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) +(MulSubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) +(MulSubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) +(MulSubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) +(MulSubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) +(MulSubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(MulSubAddMaskedFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MulSubAddMaskedFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MulSubAddMaskedFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MulSubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MulSubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MulSubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) (NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) @@ -1035,6 +1038,54 @@ (NotEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (NotEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (NotEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(OnesCountInt8x16 ...) => (VPOPCNTB128 ...) +(OnesCountInt8x32 ...) => (VPOPCNTB256 ...) +(OnesCountInt8x64 ...) => (VPOPCNTB512 ...) +(OnesCountInt16x8 ...) => (VPOPCNTW128 ...) +(OnesCountInt16x16 ...) => (VPOPCNTW256 ...) +(OnesCountInt16x32 ...) => (VPOPCNTW512 ...) +(OnesCountInt32x4 ...) => (VPOPCNTD128 ...) +(OnesCountInt32x8 ...) => (VPOPCNTD256 ...) +(OnesCountInt32x16 ...) => (VPOPCNTD512 ...) +(OnesCountInt64x2 ...) => (VPOPCNTQ128 ...) +(OnesCountInt64x4 ...) => (VPOPCNTQ256 ...) +(OnesCountInt64x8 ...) => (VPOPCNTQ512 ...) +(OnesCountUint8x16 ...) => (VPOPCNTB128 ...) +(OnesCountUint8x32 ...) => (VPOPCNTB256 ...) +(OnesCountUint8x64 ...) => (VPOPCNTB512 ...) +(OnesCountUint16x8 ...) => (VPOPCNTW128 ...) +(OnesCountUint16x16 ...) => (VPOPCNTW256 ...) +(OnesCountUint16x32 ...) => (VPOPCNTW512 ...) +(OnesCountUint32x4 ...) => (VPOPCNTD128 ...) +(OnesCountUint32x8 ...) => (VPOPCNTD256 ...) +(OnesCountUint32x16 ...) => (VPOPCNTD512 ...) +(OnesCountUint64x2 ...) => (VPOPCNTQ128 ...) +(OnesCountUint64x4 ...) => (VPOPCNTQ256 ...) +(OnesCountUint64x8 ...) => (VPOPCNTQ512 ...) +(OnesCountMaskedInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(OnesCountMaskedInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(OnesCountMaskedInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(OnesCountMaskedInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(OnesCountMaskedInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(OnesCountMaskedInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(OnesCountMaskedInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(OnesCountMaskedInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(OnesCountMaskedInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(OnesCountMaskedInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(OnesCountMaskedInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(OnesCountMaskedInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(OnesCountMaskedUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(OnesCountMaskedUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(OnesCountMaskedUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(OnesCountMaskedUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(OnesCountMaskedUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(OnesCountMaskedUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(OnesCountMaskedUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(OnesCountMaskedUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(OnesCountMaskedUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(OnesCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(OnesCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(OnesCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) (OrInt8x64 ...) => (VPORD512 ...) @@ -1071,12 +1122,6 @@ (OrMaskedUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (OrMaskedUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (OrMaskedUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) -(PairDotProdInt16x16 ...) => (VPMADDWD256 ...) -(PairDotProdInt16x32 ...) => (VPMADDWD512 ...) -(PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) -(PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) -(PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) (PermuteFloat32x8 ...) => (VPERMPS256 ...) (PermuteFloat32x16 ...) => (VPERMPS512 ...) (PermuteFloat64x4 ...) => (VPERMPD256 ...) @@ -1185,54 +1230,30 @@ (PermuteMaskedUint32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) (PermuteMaskedUint64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) (PermuteMaskedUint64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) -(PopCountInt8x16 ...) => (VPOPCNTB128 ...) -(PopCountInt8x32 ...) => (VPOPCNTB256 ...) -(PopCountInt8x64 ...) => (VPOPCNTB512 ...) -(PopCountInt16x8 ...) => (VPOPCNTW128 ...) -(PopCountInt16x16 ...) => (VPOPCNTW256 ...) -(PopCountInt16x32 ...) => (VPOPCNTW512 ...) -(PopCountInt32x4 ...) => (VPOPCNTD128 ...) -(PopCountInt32x8 ...) => (VPOPCNTD256 ...) -(PopCountInt32x16 ...) => (VPOPCNTD512 ...) -(PopCountInt64x2 ...) => (VPOPCNTQ128 ...) -(PopCountInt64x4 ...) => (VPOPCNTQ256 ...) -(PopCountInt64x8 ...) => (VPOPCNTQ512 ...) -(PopCountUint8x16 ...) => (VPOPCNTB128 ...) -(PopCountUint8x32 ...) => (VPOPCNTB256 ...) -(PopCountUint8x64 ...) => (VPOPCNTB512 ...) -(PopCountUint16x8 ...) => (VPOPCNTW128 ...) -(PopCountUint16x16 ...) => (VPOPCNTW256 ...) -(PopCountUint16x32 ...) => (VPOPCNTW512 ...) -(PopCountUint32x4 ...) => (VPOPCNTD128 ...) -(PopCountUint32x8 ...) => (VPOPCNTD256 ...) -(PopCountUint32x16 ...) => (VPOPCNTD512 ...) -(PopCountUint64x2 ...) => (VPOPCNTQ128 ...) -(PopCountUint64x4 ...) => (VPOPCNTQ256 ...) -(PopCountUint64x8 ...) => (VPOPCNTQ512 ...) -(PopCountMaskedInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(PopCountMaskedInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(PopCountMaskedInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(PopCountMaskedInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(PopCountMaskedInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(PopCountMaskedInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(PopCountMaskedInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(PopCountMaskedInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(PopCountMaskedInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(PopCountMaskedInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(PopCountMaskedInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(PopCountMaskedInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(PopCountMaskedUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(PopCountMaskedUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(PopCountMaskedUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(PopCountMaskedUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(PopCountMaskedUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(PopCountMaskedUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(PopCountMaskedUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(PopCountMaskedUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(PopCountMaskedUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(PopCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(PopCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(PopCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(ReciprocalFloat32x4 ...) => (VRCPPS128 ...) +(ReciprocalFloat32x8 ...) => (VRCPPS256 ...) +(ReciprocalFloat32x16 ...) => (VRCP14PS512 ...) +(ReciprocalFloat64x2 ...) => (VRCP14PD128 ...) +(ReciprocalFloat64x4 ...) => (VRCP14PD256 ...) +(ReciprocalFloat64x8 ...) => (VRCP14PD512 ...) +(ReciprocalMaskedFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ReciprocalMaskedFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ReciprocalMaskedFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ReciprocalMaskedFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ReciprocalMaskedFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ReciprocalMaskedFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) +(ReciprocalSqrtFloat32x4 ...) => (VRSQRTPS128 ...) +(ReciprocalSqrtFloat32x8 ...) => (VRSQRTPS256 ...) +(ReciprocalSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) +(ReciprocalSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) +(ReciprocalSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) +(ReciprocalSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) +(ReciprocalSqrtMaskedFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ReciprocalSqrtMaskedFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ReciprocalSqrtMaskedFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ReciprocalSqrtMaskedFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ReciprocalSqrtMaskedFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ReciprocalSqrtMaskedFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) (RotateAllLeftInt32x4 ...) => (VPROLD128 ...) (RotateAllLeftInt32x8 ...) => (VPROLD256 ...) (RotateAllLeftInt32x16 ...) => (VPROLD512 ...) @@ -1329,52 +1350,34 @@ (RotateRightMaskedUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) (RotateRightMaskedUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) (RotateRightMaskedUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(RoundFloat32x4 x) => (VROUNDPS128 [0] x) -(RoundFloat32x8 x) => (VROUNDPS256 [0] x) -(RoundFloat64x2 x) => (VROUNDPD128 [0] x) -(RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) -(RoundScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) -(RoundScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) -(RoundScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) -(RoundScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) -(RoundScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) -(RoundScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(RoundScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) -(RoundScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) -(RoundScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) -(RoundScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) -(RoundScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) -(RoundScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(RoundScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(SaturatedAddDotProdInt32x4 ...) => (VPDPWSSDS128 ...) -(SaturatedAddDotProdInt32x8 ...) => (VPDPWSSDS256 ...) -(SaturatedAddDotProdInt32x16 ...) => (VPDPWSSDS512 ...) -(SaturatedAddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedAddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedAddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) -(SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) -(SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) -(SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(RoundToEvenFloat32x4 x) => (VROUNDPS128 [0] x) +(RoundToEvenFloat32x8 x) => (VROUNDPS256 [0] x) +(RoundToEvenFloat64x2 x) => (VROUNDPD128 [0] x) +(RoundToEvenFloat64x4 x) => (VROUNDPD256 [0] x) +(RoundToEvenScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) +(RoundToEvenScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundToEvenScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) +(RoundToEvenScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) +(RoundToEvenScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) +(RoundToEvenScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) +(RoundToEvenScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundToEvenScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundToEvenScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundToEvenScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundToEvenScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundToEvenScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) +(RoundToEvenScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) +(RoundToEvenScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(RoundToEvenScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) +(RoundToEvenScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) +(RoundToEvenScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) +(RoundToEvenScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (ScaleFloat32x4 ...) => (VSCALEFPS128 ...) (ScaleFloat32x8 ...) => (VSCALEFPS256 ...) (ScaleFloat32x16 ...) => (VSCALEFPS512 ...) @@ -1795,12 +1798,6 @@ (ShiftRightMaskedUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftRightMaskedUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftRightMaskedUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(SignInt8x16 ...) => (VPSIGNB128 ...) -(SignInt8x32 ...) => (VPSIGNB256 ...) -(SignInt16x8 ...) => (VPSIGNW128 ...) -(SignInt16x16 ...) => (VPSIGNW256 ...) -(SignInt32x4 ...) => (VPSIGND128 ...) -(SignInt32x8 ...) => (VPSIGND256 ...) (SqrtFloat32x4 ...) => (VSQRTPS128 ...) (SqrtFloat32x8 ...) => (VSQRTPS256 ...) (SqrtFloat32x16 ...) => (VSQRTPS512 ...) @@ -1893,24 +1890,24 @@ (SubSaturatedInt16x8 ...) => (VPSUBSW128 ...) (SubSaturatedInt16x16 ...) => (VPSUBSW256 ...) (SubSaturatedInt16x32 ...) => (VPSUBSW512 ...) -(SubSaturatedUint8x16 ...) => (VPSUBSB128 ...) -(SubSaturatedUint8x32 ...) => (VPSUBSB256 ...) -(SubSaturatedUint8x64 ...) => (VPSUBSB512 ...) -(SubSaturatedUint16x8 ...) => (VPSUBSW128 ...) -(SubSaturatedUint16x16 ...) => (VPSUBSW256 ...) -(SubSaturatedUint16x32 ...) => (VPSUBSW512 ...) +(SubSaturatedUint8x16 ...) => (VPSUBUSB128 ...) +(SubSaturatedUint8x32 ...) => (VPSUBUSB256 ...) +(SubSaturatedUint8x64 ...) => (VPSUBUSB512 ...) +(SubSaturatedUint16x8 ...) => (VPSUBUSW128 ...) +(SubSaturatedUint16x16 ...) => (VPSUBUSW256 ...) +(SubSaturatedUint16x32 ...) => (VPSUBUSW512 ...) (SubSaturatedMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (SubSaturatedMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (SubSaturatedMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) (SubSaturatedMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) (SubSaturatedMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (SubSaturatedMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SubSaturatedMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SubSaturatedMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SubSaturatedMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SubSaturatedMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SubSaturatedMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SubSaturatedMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubSaturatedMaskedUint8x16 x y mask) => (VPSUBUSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubSaturatedMaskedUint8x32 x y mask) => (VPSUBUSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubSaturatedMaskedUint8x64 x y mask) => (VPSUBUSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubSaturatedMaskedUint16x8 x y mask) => (VPSUBUSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubSaturatedMaskedUint16x16 x y mask) => (VPSUBUSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubSaturatedMaskedUint16x32 x y mask) => (VPSUBUSWMasked512 x y (VPMOVVec16x32ToM mask)) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) @@ -1939,12 +1936,6 @@ (TruncScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (TruncScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (TruncScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) -(UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) -(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) -(UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt8x64 ...) => (VPXORD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index ccda39f59d33d7..665372f79d9aa7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -195,6 +195,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSB128", argLength: 2, reg: v21, asm: "VPADDUSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSB256", argLength: 2, reg: v21, asm: "VPADDUSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSB512", argLength: 2, reg: w21, asm: "VPADDUSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDUSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDUSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDUSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSW128", argLength: 2, reg: v21, asm: "VPADDUSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSW256", argLength: 2, reg: v21, asm: "VPADDUSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSW512", argLength: 2, reg: w21, asm: "VPADDUSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDUSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDUSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDUSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -497,22 +509,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -533,10 +535,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -775,6 +773,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSB128", argLength: 2, reg: v21, asm: "VPSUBUSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSB256", argLength: 2, reg: v21, asm: "VPSUBUSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSB512", argLength: 2, reg: w21, asm: "VPSUBUSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBUSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBUSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBUSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSW128", argLength: 2, reg: v21, asm: "VPSUBUSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSW256", argLength: 2, reg: v21, asm: "VPSUBUSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSW512", argLength: 2, reg: w21, asm: "VPSUBUSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBUSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBUSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBUSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -879,9 +889,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index d0a4a494b181c0..45c62f95a7ff85 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -3,36 +3,48 @@ package main func simdGenericOps() []opData { return []opData{ - {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, - {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, - {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, - {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, - {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, - {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, - {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, - {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, - {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, - {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, - {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, - {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, - {name: "AddDotProdInt32x4", argLength: 3, commutative: false}, - {name: "AddDotProdInt32x8", argLength: 3, commutative: false}, - {name: "AddDotProdInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProdMaskedInt32x4", argLength: 4, commutative: false}, - {name: "AddDotProdMaskedInt32x8", argLength: 4, commutative: false}, - {name: "AddDotProdMaskedInt32x16", argLength: 4, commutative: false}, + {name: "AbsInt8x16", argLength: 1, commutative: false}, + {name: "AbsInt8x32", argLength: 1, commutative: false}, + {name: "AbsInt8x64", argLength: 1, commutative: false}, + {name: "AbsInt16x8", argLength: 1, commutative: false}, + {name: "AbsInt16x16", argLength: 1, commutative: false}, + {name: "AbsInt16x32", argLength: 1, commutative: false}, + {name: "AbsInt32x4", argLength: 1, commutative: false}, + {name: "AbsInt32x8", argLength: 1, commutative: false}, + {name: "AbsInt32x16", argLength: 1, commutative: false}, + {name: "AbsInt64x2", argLength: 1, commutative: false}, + {name: "AbsInt64x4", argLength: 1, commutative: false}, + {name: "AbsInt64x8", argLength: 1, commutative: false}, + {name: "AbsMaskedInt8x16", argLength: 2, commutative: false}, + {name: "AbsMaskedInt8x32", argLength: 2, commutative: false}, + {name: "AbsMaskedInt8x64", argLength: 2, commutative: false}, + {name: "AbsMaskedInt16x8", argLength: 2, commutative: false}, + {name: "AbsMaskedInt16x16", argLength: 2, commutative: false}, + {name: "AbsMaskedInt16x32", argLength: 2, commutative: false}, + {name: "AbsMaskedInt32x4", argLength: 2, commutative: false}, + {name: "AbsMaskedInt32x8", argLength: 2, commutative: false}, + {name: "AbsMaskedInt32x16", argLength: 2, commutative: false}, + {name: "AbsMaskedInt64x2", argLength: 2, commutative: false}, + {name: "AbsMaskedInt64x4", argLength: 2, commutative: false}, + {name: "AbsMaskedInt64x8", argLength: 2, commutative: false}, + {name: "AddDotProdPairsSaturatedInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdPairsSaturatedInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdPairsSaturatedInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdPairsSaturatedMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdPairsSaturatedMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdPairsSaturatedMaskedInt32x16", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleMaskedInt32x16", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedMaskedInt32x16", argLength: 4, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, @@ -207,30 +219,6 @@ func simdGenericOps() []opData { {name: "AndUint64x2", argLength: 2, commutative: true}, {name: "AndUint64x4", argLength: 2, commutative: true}, {name: "AndUint64x8", argLength: 2, commutative: true}, - {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, @@ -289,6 +277,12 @@ func simdGenericOps() []opData { {name: "ConvertToUint32MaskedFloat32x4", argLength: 2, commutative: false}, {name: "ConvertToUint32MaskedFloat32x8", argLength: 2, commutative: false}, {name: "ConvertToUint32MaskedFloat32x16", argLength: 2, commutative: false}, + {name: "CopySignInt8x16", argLength: 2, commutative: false}, + {name: "CopySignInt8x32", argLength: 2, commutative: false}, + {name: "CopySignInt16x8", argLength: 2, commutative: false}, + {name: "CopySignInt16x16", argLength: 2, commutative: false}, + {name: "CopySignInt32x4", argLength: 2, commutative: false}, + {name: "CopySignInt32x8", argLength: 2, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, @@ -301,9 +295,18 @@ func simdGenericOps() []opData { {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, - {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, - {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, + {name: "DotProdPairsInt16x8", argLength: 2, commutative: false}, + {name: "DotProdPairsInt16x16", argLength: 2, commutative: false}, + {name: "DotProdPairsInt16x32", argLength: 2, commutative: false}, + {name: "DotProdPairsMaskedInt16x8", argLength: 3, commutative: false}, + {name: "DotProdPairsMaskedInt16x16", argLength: 3, commutative: false}, + {name: "DotProdPairsMaskedInt16x32", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedMaskedUint8x16", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedMaskedUint8x32", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedMaskedUint8x64", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedUint8x16", argLength: 2, commutative: false}, + {name: "DotProdPairsSaturatedUint8x32", argLength: 2, commutative: false}, + {name: "DotProdPairsSaturatedUint8x64", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, @@ -398,42 +401,6 @@ func simdGenericOps() []opData { {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, @@ -852,22 +819,34 @@ func simdGenericOps() []opData { {name: "MinUint64x2", argLength: 2, commutative: true}, {name: "MinUint64x4", argLength: 2, commutative: true}, {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MulAddFloat32x4", argLength: 3, commutative: false}, + {name: "MulAddFloat32x8", argLength: 3, commutative: false}, + {name: "MulAddFloat32x16", argLength: 3, commutative: false}, + {name: "MulAddFloat64x2", argLength: 3, commutative: false}, + {name: "MulAddFloat64x4", argLength: 3, commutative: false}, + {name: "MulAddFloat64x8", argLength: 3, commutative: false}, + {name: "MulAddMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat64x8", argLength: 4, commutative: false}, + {name: "MulAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "MulAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "MulAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "MulAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "MulAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "MulAddSubFloat64x8", argLength: 3, commutative: false}, + {name: "MulAddSubMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat64x8", argLength: 4, commutative: false}, {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, {name: "MulFloat32x4", argLength: 2, commutative: true}, {name: "MulFloat32x8", argLength: 2, commutative: true}, {name: "MulFloat32x16", argLength: 2, commutative: true}, @@ -880,12 +859,6 @@ func simdGenericOps() []opData { {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MulHighUint16x8", argLength: 2, commutative: true}, - {name: "MulHighUint16x16", argLength: 2, commutative: true}, - {name: "MulHighUint16x32", argLength: 2, commutative: true}, {name: "MulInt16x8", argLength: 2, commutative: true}, {name: "MulInt16x16", argLength: 2, commutative: true}, {name: "MulInt16x32", argLength: 2, commutative: true}, @@ -910,6 +883,36 @@ func simdGenericOps() []opData { {name: "MulMaskedInt64x2", argLength: 3, commutative: true}, {name: "MulMaskedInt64x4", argLength: 3, commutative: true}, {name: "MulMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MulMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MulMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MulMaskedUint32x4", argLength: 3, commutative: true}, + {name: "MulMaskedUint32x8", argLength: 3, commutative: true}, + {name: "MulMaskedUint32x16", argLength: 3, commutative: true}, + {name: "MulMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MulMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MulMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MulSubAddFloat32x4", argLength: 3, commutative: false}, + {name: "MulSubAddFloat32x8", argLength: 3, commutative: false}, + {name: "MulSubAddFloat32x16", argLength: 3, commutative: false}, + {name: "MulSubAddFloat64x2", argLength: 3, commutative: false}, + {name: "MulSubAddFloat64x4", argLength: 3, commutative: false}, + {name: "MulSubAddFloat64x8", argLength: 3, commutative: false}, + {name: "MulSubAddMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat64x8", argLength: 4, commutative: false}, + {name: "MulUint16x8", argLength: 2, commutative: true}, + {name: "MulUint16x16", argLength: 2, commutative: true}, + {name: "MulUint16x32", argLength: 2, commutative: true}, + {name: "MulUint32x4", argLength: 2, commutative: true}, + {name: "MulUint32x8", argLength: 2, commutative: true}, + {name: "MulUint32x16", argLength: 2, commutative: true}, + {name: "MulUint64x2", argLength: 2, commutative: true}, + {name: "MulUint64x4", argLength: 2, commutative: true}, + {name: "MulUint64x8", argLength: 2, commutative: true}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, @@ -970,6 +973,54 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x2", argLength: 2, commutative: true}, {name: "NotEqualUint64x4", argLength: 2, commutative: true}, {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "OnesCountInt8x16", argLength: 1, commutative: false}, + {name: "OnesCountInt8x32", argLength: 1, commutative: false}, + {name: "OnesCountInt8x64", argLength: 1, commutative: false}, + {name: "OnesCountInt16x8", argLength: 1, commutative: false}, + {name: "OnesCountInt16x16", argLength: 1, commutative: false}, + {name: "OnesCountInt16x32", argLength: 1, commutative: false}, + {name: "OnesCountInt32x4", argLength: 1, commutative: false}, + {name: "OnesCountInt32x8", argLength: 1, commutative: false}, + {name: "OnesCountInt32x16", argLength: 1, commutative: false}, + {name: "OnesCountInt64x2", argLength: 1, commutative: false}, + {name: "OnesCountInt64x4", argLength: 1, commutative: false}, + {name: "OnesCountInt64x8", argLength: 1, commutative: false}, + {name: "OnesCountMaskedInt8x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt8x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt8x64", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt16x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt16x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt16x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt32x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt32x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt32x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt64x2", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt64x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt64x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint8x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint8x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint8x64", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint16x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint16x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint16x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint32x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint32x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint32x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint64x2", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint64x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint64x8", argLength: 2, commutative: false}, + {name: "OnesCountUint8x16", argLength: 1, commutative: false}, + {name: "OnesCountUint8x32", argLength: 1, commutative: false}, + {name: "OnesCountUint8x64", argLength: 1, commutative: false}, + {name: "OnesCountUint16x8", argLength: 1, commutative: false}, + {name: "OnesCountUint16x16", argLength: 1, commutative: false}, + {name: "OnesCountUint16x32", argLength: 1, commutative: false}, + {name: "OnesCountUint32x4", argLength: 1, commutative: false}, + {name: "OnesCountUint32x8", argLength: 1, commutative: false}, + {name: "OnesCountUint32x16", argLength: 1, commutative: false}, + {name: "OnesCountUint64x2", argLength: 1, commutative: false}, + {name: "OnesCountUint64x4", argLength: 1, commutative: false}, + {name: "OnesCountUint64x8", argLength: 1, commutative: false}, {name: "OrInt8x16", argLength: 2, commutative: true}, {name: "OrInt8x32", argLength: 2, commutative: true}, {name: "OrInt8x64", argLength: 2, commutative: true}, @@ -1006,12 +1057,6 @@ func simdGenericOps() []opData { {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, - {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, - {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, - {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, - {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, - {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Float32x8", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, @@ -1120,54 +1165,30 @@ func simdGenericOps() []opData { {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "PopCountInt8x16", argLength: 1, commutative: false}, - {name: "PopCountInt8x32", argLength: 1, commutative: false}, - {name: "PopCountInt8x64", argLength: 1, commutative: false}, - {name: "PopCountInt16x8", argLength: 1, commutative: false}, - {name: "PopCountInt16x16", argLength: 1, commutative: false}, - {name: "PopCountInt16x32", argLength: 1, commutative: false}, - {name: "PopCountInt32x4", argLength: 1, commutative: false}, - {name: "PopCountInt32x8", argLength: 1, commutative: false}, - {name: "PopCountInt32x16", argLength: 1, commutative: false}, - {name: "PopCountInt64x2", argLength: 1, commutative: false}, - {name: "PopCountInt64x4", argLength: 1, commutative: false}, - {name: "PopCountInt64x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt32x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, - {name: "PopCountUint8x16", argLength: 1, commutative: false}, - {name: "PopCountUint8x32", argLength: 1, commutative: false}, - {name: "PopCountUint8x64", argLength: 1, commutative: false}, - {name: "PopCountUint16x8", argLength: 1, commutative: false}, - {name: "PopCountUint16x16", argLength: 1, commutative: false}, - {name: "PopCountUint16x32", argLength: 1, commutative: false}, - {name: "PopCountUint32x4", argLength: 1, commutative: false}, - {name: "PopCountUint32x8", argLength: 1, commutative: false}, - {name: "PopCountUint32x16", argLength: 1, commutative: false}, - {name: "PopCountUint64x2", argLength: 1, commutative: false}, - {name: "PopCountUint64x4", argLength: 1, commutative: false}, - {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "ReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ReciprocalFloat32x8", argLength: 1, commutative: false}, + {name: "ReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, @@ -1216,28 +1237,10 @@ func simdGenericOps() []opData { {name: "RotateRightUint64x2", argLength: 2, commutative: false}, {name: "RotateRightUint64x4", argLength: 2, commutative: false}, {name: "RotateRightUint64x8", argLength: 2, commutative: false}, - {name: "RoundFloat32x4", argLength: 1, commutative: false}, - {name: "RoundFloat32x8", argLength: 1, commutative: false}, - {name: "RoundFloat64x2", argLength: 1, commutative: false}, - {name: "RoundFloat64x4", argLength: 1, commutative: false}, - {name: "SaturatedAddDotProdInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedAddDotProdInt32x8", argLength: 3, commutative: false}, - {name: "SaturatedAddDotProdInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedAddDotProdMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedAddDotProdMaskedInt32x8", argLength: 4, commutative: false}, - {name: "SaturatedAddDotProdMaskedInt32x16", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "RoundToEvenFloat32x4", argLength: 1, commutative: false}, + {name: "RoundToEvenFloat32x8", argLength: 1, commutative: false}, + {name: "RoundToEvenFloat64x2", argLength: 1, commutative: false}, + {name: "RoundToEvenFloat64x4", argLength: 1, commutative: false}, {name: "ScaleFloat32x4", argLength: 2, commutative: false}, {name: "ScaleFloat32x8", argLength: 2, commutative: false}, {name: "ScaleFloat32x16", argLength: 2, commutative: false}, @@ -1506,12 +1509,6 @@ func simdGenericOps() []opData { {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, - {name: "SignInt8x16", argLength: 2, commutative: false}, - {name: "SignInt8x32", argLength: 2, commutative: false}, - {name: "SignInt16x8", argLength: 2, commutative: false}, - {name: "SignInt16x16", argLength: 2, commutative: false}, - {name: "SignInt32x4", argLength: 2, commutative: false}, - {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x16", argLength: 1, commutative: false}, @@ -1626,12 +1623,6 @@ func simdGenericOps() []opData { {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, {name: "TruncFloat64x4", argLength: 1, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "XorInt8x16", argLength: 2, commutative: true}, {name: "XorInt8x32", argLength: 2, commutative: true}, {name: "XorInt8x64", argLength: 2, commutative: true}, @@ -1790,30 +1781,30 @@ func simdGenericOps() []opData { {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7c135ea692cee0..8bf850d78ed503 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1418,6 +1418,18 @@ const ( OpAMD64VPADDSWMasked128 OpAMD64VPADDSWMasked256 OpAMD64VPADDSWMasked512 + OpAMD64VPADDUSB128 + OpAMD64VPADDUSB256 + OpAMD64VPADDUSB512 + OpAMD64VPADDUSBMasked128 + OpAMD64VPADDUSBMasked256 + OpAMD64VPADDUSBMasked512 + OpAMD64VPADDUSW128 + OpAMD64VPADDUSW256 + OpAMD64VPADDUSW512 + OpAMD64VPADDUSWMasked128 + OpAMD64VPADDUSWMasked256 + OpAMD64VPADDUSWMasked512 OpAMD64VPADDW128 OpAMD64VPADDW256 OpAMD64VPADDW512 @@ -1720,22 +1732,12 @@ const ( OpAMD64VPMINUWMasked512 OpAMD64VPMULDQ128 OpAMD64VPMULDQ256 - OpAMD64VPMULDQ512 - OpAMD64VPMULDQMasked128 - OpAMD64VPMULDQMasked256 - OpAMD64VPMULDQMasked512 OpAMD64VPMULHUW128 OpAMD64VPMULHUW256 - OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked128 - OpAMD64VPMULHUWMasked256 OpAMD64VPMULHUWMasked512 - OpAMD64VPMULHW128 - OpAMD64VPMULHW256 OpAMD64VPMULHW512 - OpAMD64VPMULHWMasked128 OpAMD64VPMULHWMasked256 - OpAMD64VPMULHWMasked512 OpAMD64VPMULLD128 OpAMD64VPMULLD256 OpAMD64VPMULLD512 @@ -1756,10 +1758,6 @@ const ( OpAMD64VPMULLWMasked512 OpAMD64VPMULUDQ128 OpAMD64VPMULUDQ256 - OpAMD64VPMULUDQ512 - OpAMD64VPMULUDQMasked128 - OpAMD64VPMULUDQMasked256 - OpAMD64VPMULUDQMasked512 OpAMD64VPOPCNTB128 OpAMD64VPOPCNTB256 OpAMD64VPOPCNTB512 @@ -1998,6 +1996,18 @@ const ( OpAMD64VPSUBSWMasked128 OpAMD64VPSUBSWMasked256 OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBUSB128 + OpAMD64VPSUBUSB256 + OpAMD64VPSUBUSB512 + OpAMD64VPSUBUSBMasked128 + OpAMD64VPSUBUSBMasked256 + OpAMD64VPSUBUSBMasked512 + OpAMD64VPSUBUSW128 + OpAMD64VPSUBUSW256 + OpAMD64VPSUBUSW512 + OpAMD64VPSUBUSWMasked128 + OpAMD64VPSUBUSWMasked256 + OpAMD64VPSUBUSWMasked512 OpAMD64VPSUBW128 OpAMD64VPSUBW256 OpAMD64VPSUBW512 @@ -2102,9 +2112,6 @@ const ( OpAMD64VREDUCEPDMasked128 OpAMD64VREDUCEPDMasked256 OpAMD64VREDUCEPDMasked512 - OpAMD64VDPPS128 - OpAMD64VDPPS256 - OpAMD64VDPPD128 OpAMD64VCMPPS128 OpAMD64VCMPPS256 OpAMD64VCMPPS512 @@ -4598,36 +4605,48 @@ const ( OpCvtMask64x2to8 OpCvtMask64x4to8 OpCvtMask64x8to8 - OpAbsoluteInt8x16 - OpAbsoluteInt8x32 - OpAbsoluteInt8x64 - OpAbsoluteInt16x8 - OpAbsoluteInt16x16 - OpAbsoluteInt16x32 - OpAbsoluteInt32x4 - OpAbsoluteInt32x8 - OpAbsoluteInt32x16 - OpAbsoluteInt64x2 - OpAbsoluteInt64x4 - OpAbsoluteInt64x8 - OpAbsoluteMaskedInt8x16 - OpAbsoluteMaskedInt8x32 - OpAbsoluteMaskedInt8x64 - OpAbsoluteMaskedInt16x8 - OpAbsoluteMaskedInt16x16 - OpAbsoluteMaskedInt16x32 - OpAbsoluteMaskedInt32x4 - OpAbsoluteMaskedInt32x8 - OpAbsoluteMaskedInt32x16 - OpAbsoluteMaskedInt64x2 - OpAbsoluteMaskedInt64x4 - OpAbsoluteMaskedInt64x8 - OpAddDotProdInt32x4 - OpAddDotProdInt32x8 - OpAddDotProdInt32x16 - OpAddDotProdMaskedInt32x4 - OpAddDotProdMaskedInt32x8 - OpAddDotProdMaskedInt32x16 + OpAbsInt8x16 + OpAbsInt8x32 + OpAbsInt8x64 + OpAbsInt16x8 + OpAbsInt16x16 + OpAbsInt16x32 + OpAbsInt32x4 + OpAbsInt32x8 + OpAbsInt32x16 + OpAbsInt64x2 + OpAbsInt64x4 + OpAbsInt64x8 + OpAbsMaskedInt8x16 + OpAbsMaskedInt8x32 + OpAbsMaskedInt8x64 + OpAbsMaskedInt16x8 + OpAbsMaskedInt16x16 + OpAbsMaskedInt16x32 + OpAbsMaskedInt32x4 + OpAbsMaskedInt32x8 + OpAbsMaskedInt32x16 + OpAbsMaskedInt64x2 + OpAbsMaskedInt64x4 + OpAbsMaskedInt64x8 + OpAddDotProdPairsSaturatedInt32x4 + OpAddDotProdPairsSaturatedInt32x8 + OpAddDotProdPairsSaturatedInt32x16 + OpAddDotProdPairsSaturatedMaskedInt32x4 + OpAddDotProdPairsSaturatedMaskedInt32x8 + OpAddDotProdPairsSaturatedMaskedInt32x16 + OpAddDotProdQuadrupleInt32x4 + OpAddDotProdQuadrupleInt32x8 + OpAddDotProdQuadrupleInt32x16 + OpAddDotProdQuadrupleMaskedInt32x4 + OpAddDotProdQuadrupleMaskedInt32x8 + OpAddDotProdQuadrupleMaskedInt32x16 + OpAddDotProdQuadrupleSaturatedInt32x4 + OpAddDotProdQuadrupleSaturatedInt32x8 + OpAddDotProdQuadrupleSaturatedInt32x16 + OpAddDotProdQuadrupleSaturatedMaskedInt32x4 + OpAddDotProdQuadrupleSaturatedMaskedInt32x8 + OpAddDotProdQuadrupleSaturatedMaskedInt32x16 OpAddFloat32x4 OpAddFloat32x8 OpAddFloat32x16 @@ -4802,30 +4821,6 @@ const ( OpAndUint64x2 OpAndUint64x4 OpAndUint64x8 - OpApproximateReciprocalFloat32x4 - OpApproximateReciprocalFloat32x8 - OpApproximateReciprocalFloat32x16 - OpApproximateReciprocalFloat64x2 - OpApproximateReciprocalFloat64x4 - OpApproximateReciprocalFloat64x8 - OpApproximateReciprocalMaskedFloat32x4 - OpApproximateReciprocalMaskedFloat32x8 - OpApproximateReciprocalMaskedFloat32x16 - OpApproximateReciprocalMaskedFloat64x2 - OpApproximateReciprocalMaskedFloat64x4 - OpApproximateReciprocalMaskedFloat64x8 - OpApproximateReciprocalOfSqrtFloat32x4 - OpApproximateReciprocalOfSqrtFloat32x8 - OpApproximateReciprocalOfSqrtFloat32x16 - OpApproximateReciprocalOfSqrtFloat64x2 - OpApproximateReciprocalOfSqrtFloat64x4 - OpApproximateReciprocalOfSqrtFloat64x8 - OpApproximateReciprocalOfSqrtMaskedFloat32x4 - OpApproximateReciprocalOfSqrtMaskedFloat32x8 - OpApproximateReciprocalOfSqrtMaskedFloat32x16 - OpApproximateReciprocalOfSqrtMaskedFloat64x2 - OpApproximateReciprocalOfSqrtMaskedFloat64x4 - OpApproximateReciprocalOfSqrtMaskedFloat64x8 OpAverageMaskedUint8x16 OpAverageMaskedUint8x32 OpAverageMaskedUint8x64 @@ -4884,6 +4879,12 @@ const ( OpConvertToUint32MaskedFloat32x4 OpConvertToUint32MaskedFloat32x8 OpConvertToUint32MaskedFloat32x16 + OpCopySignInt8x16 + OpCopySignInt8x32 + OpCopySignInt16x8 + OpCopySignInt16x16 + OpCopySignInt32x4 + OpCopySignInt32x8 OpDivFloat32x4 OpDivFloat32x8 OpDivFloat32x16 @@ -4896,9 +4897,18 @@ const ( OpDivMaskedFloat64x2 OpDivMaskedFloat64x4 OpDivMaskedFloat64x8 - OpDotProdBroadcastFloat32x4 - OpDotProdBroadcastFloat32x8 - OpDotProdBroadcastFloat64x2 + OpDotProdPairsInt16x8 + OpDotProdPairsInt16x16 + OpDotProdPairsInt16x32 + OpDotProdPairsMaskedInt16x8 + OpDotProdPairsMaskedInt16x16 + OpDotProdPairsMaskedInt16x32 + OpDotProdPairsSaturatedMaskedUint8x16 + OpDotProdPairsSaturatedMaskedUint8x32 + OpDotProdPairsSaturatedMaskedUint8x64 + OpDotProdPairsSaturatedUint8x16 + OpDotProdPairsSaturatedUint8x32 + OpDotProdPairsSaturatedUint8x64 OpEqualFloat32x4 OpEqualFloat32x8 OpEqualFloat32x16 @@ -4993,42 +5003,6 @@ const ( OpFloorFloat32x8 OpFloorFloat64x2 OpFloorFloat64x4 - OpFusedMultiplyAddFloat32x4 - OpFusedMultiplyAddFloat32x8 - OpFusedMultiplyAddFloat32x16 - OpFusedMultiplyAddFloat64x2 - OpFusedMultiplyAddFloat64x4 - OpFusedMultiplyAddFloat64x8 - OpFusedMultiplyAddMaskedFloat32x4 - OpFusedMultiplyAddMaskedFloat32x8 - OpFusedMultiplyAddMaskedFloat32x16 - OpFusedMultiplyAddMaskedFloat64x2 - OpFusedMultiplyAddMaskedFloat64x4 - OpFusedMultiplyAddMaskedFloat64x8 - OpFusedMultiplyAddSubFloat32x4 - OpFusedMultiplyAddSubFloat32x8 - OpFusedMultiplyAddSubFloat32x16 - OpFusedMultiplyAddSubFloat64x2 - OpFusedMultiplyAddSubFloat64x4 - OpFusedMultiplyAddSubFloat64x8 - OpFusedMultiplyAddSubMaskedFloat32x4 - OpFusedMultiplyAddSubMaskedFloat32x8 - OpFusedMultiplyAddSubMaskedFloat32x16 - OpFusedMultiplyAddSubMaskedFloat64x2 - OpFusedMultiplyAddSubMaskedFloat64x4 - OpFusedMultiplyAddSubMaskedFloat64x8 - OpFusedMultiplySubAddFloat32x4 - OpFusedMultiplySubAddFloat32x8 - OpFusedMultiplySubAddFloat32x16 - OpFusedMultiplySubAddFloat64x2 - OpFusedMultiplySubAddFloat64x4 - OpFusedMultiplySubAddFloat64x8 - OpFusedMultiplySubAddMaskedFloat32x4 - OpFusedMultiplySubAddMaskedFloat32x8 - OpFusedMultiplySubAddMaskedFloat32x16 - OpFusedMultiplySubAddMaskedFloat64x2 - OpFusedMultiplySubAddMaskedFloat64x4 - OpFusedMultiplySubAddMaskedFloat64x8 OpGaloisFieldMulMaskedUint8x16 OpGaloisFieldMulMaskedUint8x32 OpGaloisFieldMulMaskedUint8x64 @@ -5447,22 +5421,34 @@ const ( OpMinUint64x2 OpMinUint64x4 OpMinUint64x8 + OpMulAddFloat32x4 + OpMulAddFloat32x8 + OpMulAddFloat32x16 + OpMulAddFloat64x2 + OpMulAddFloat64x4 + OpMulAddFloat64x8 + OpMulAddMaskedFloat32x4 + OpMulAddMaskedFloat32x8 + OpMulAddMaskedFloat32x16 + OpMulAddMaskedFloat64x2 + OpMulAddMaskedFloat64x4 + OpMulAddMaskedFloat64x8 + OpMulAddSubFloat32x4 + OpMulAddSubFloat32x8 + OpMulAddSubFloat32x16 + OpMulAddSubFloat64x2 + OpMulAddSubFloat64x4 + OpMulAddSubFloat64x8 + OpMulAddSubMaskedFloat32x4 + OpMulAddSubMaskedFloat32x8 + OpMulAddSubMaskedFloat32x16 + OpMulAddSubMaskedFloat64x2 + OpMulAddSubMaskedFloat64x4 + OpMulAddSubMaskedFloat64x8 OpMulEvenWidenInt32x4 OpMulEvenWidenInt32x8 - OpMulEvenWidenInt64x2 - OpMulEvenWidenInt64x4 - OpMulEvenWidenInt64x8 - OpMulEvenWidenMaskedInt64x2 - OpMulEvenWidenMaskedInt64x4 - OpMulEvenWidenMaskedInt64x8 - OpMulEvenWidenMaskedUint64x2 - OpMulEvenWidenMaskedUint64x4 - OpMulEvenWidenMaskedUint64x8 OpMulEvenWidenUint32x4 OpMulEvenWidenUint32x8 - OpMulEvenWidenUint64x2 - OpMulEvenWidenUint64x4 - OpMulEvenWidenUint64x8 OpMulFloat32x4 OpMulFloat32x8 OpMulFloat32x16 @@ -5475,12 +5461,6 @@ const ( OpMulHighMaskedInt16x8 OpMulHighMaskedInt16x16 OpMulHighMaskedInt16x32 - OpMulHighMaskedUint16x8 - OpMulHighMaskedUint16x16 - OpMulHighMaskedUint16x32 - OpMulHighUint16x8 - OpMulHighUint16x16 - OpMulHighUint16x32 OpMulInt16x8 OpMulInt16x16 OpMulInt16x32 @@ -5505,6 +5485,36 @@ const ( OpMulMaskedInt64x2 OpMulMaskedInt64x4 OpMulMaskedInt64x8 + OpMulMaskedUint16x8 + OpMulMaskedUint16x16 + OpMulMaskedUint16x32 + OpMulMaskedUint32x4 + OpMulMaskedUint32x8 + OpMulMaskedUint32x16 + OpMulMaskedUint64x2 + OpMulMaskedUint64x4 + OpMulMaskedUint64x8 + OpMulSubAddFloat32x4 + OpMulSubAddFloat32x8 + OpMulSubAddFloat32x16 + OpMulSubAddFloat64x2 + OpMulSubAddFloat64x4 + OpMulSubAddFloat64x8 + OpMulSubAddMaskedFloat32x4 + OpMulSubAddMaskedFloat32x8 + OpMulSubAddMaskedFloat32x16 + OpMulSubAddMaskedFloat64x2 + OpMulSubAddMaskedFloat64x4 + OpMulSubAddMaskedFloat64x8 + OpMulUint16x8 + OpMulUint16x16 + OpMulUint16x32 + OpMulUint32x4 + OpMulUint32x8 + OpMulUint32x16 + OpMulUint64x2 + OpMulUint64x4 + OpMulUint64x8 OpNotEqualFloat32x4 OpNotEqualFloat32x8 OpNotEqualFloat32x16 @@ -5565,6 +5575,54 @@ const ( OpNotEqualUint64x2 OpNotEqualUint64x4 OpNotEqualUint64x8 + OpOnesCountInt8x16 + OpOnesCountInt8x32 + OpOnesCountInt8x64 + OpOnesCountInt16x8 + OpOnesCountInt16x16 + OpOnesCountInt16x32 + OpOnesCountInt32x4 + OpOnesCountInt32x8 + OpOnesCountInt32x16 + OpOnesCountInt64x2 + OpOnesCountInt64x4 + OpOnesCountInt64x8 + OpOnesCountMaskedInt8x16 + OpOnesCountMaskedInt8x32 + OpOnesCountMaskedInt8x64 + OpOnesCountMaskedInt16x8 + OpOnesCountMaskedInt16x16 + OpOnesCountMaskedInt16x32 + OpOnesCountMaskedInt32x4 + OpOnesCountMaskedInt32x8 + OpOnesCountMaskedInt32x16 + OpOnesCountMaskedInt64x2 + OpOnesCountMaskedInt64x4 + OpOnesCountMaskedInt64x8 + OpOnesCountMaskedUint8x16 + OpOnesCountMaskedUint8x32 + OpOnesCountMaskedUint8x64 + OpOnesCountMaskedUint16x8 + OpOnesCountMaskedUint16x16 + OpOnesCountMaskedUint16x32 + OpOnesCountMaskedUint32x4 + OpOnesCountMaskedUint32x8 + OpOnesCountMaskedUint32x16 + OpOnesCountMaskedUint64x2 + OpOnesCountMaskedUint64x4 + OpOnesCountMaskedUint64x8 + OpOnesCountUint8x16 + OpOnesCountUint8x32 + OpOnesCountUint8x64 + OpOnesCountUint16x8 + OpOnesCountUint16x16 + OpOnesCountUint16x32 + OpOnesCountUint32x4 + OpOnesCountUint32x8 + OpOnesCountUint32x16 + OpOnesCountUint64x2 + OpOnesCountUint64x4 + OpOnesCountUint64x8 OpOrInt8x16 OpOrInt8x32 OpOrInt8x64 @@ -5601,12 +5659,6 @@ const ( OpOrUint64x2 OpOrUint64x4 OpOrUint64x8 - OpPairDotProdInt16x8 - OpPairDotProdInt16x16 - OpPairDotProdInt16x32 - OpPairDotProdMaskedInt16x8 - OpPairDotProdMaskedInt16x16 - OpPairDotProdMaskedInt16x32 OpPermute2Float32x4 OpPermute2Float32x8 OpPermute2Float32x16 @@ -5715,54 +5767,30 @@ const ( OpPermuteUint32x16 OpPermuteUint64x4 OpPermuteUint64x8 - OpPopCountInt8x16 - OpPopCountInt8x32 - OpPopCountInt8x64 - OpPopCountInt16x8 - OpPopCountInt16x16 - OpPopCountInt16x32 - OpPopCountInt32x4 - OpPopCountInt32x8 - OpPopCountInt32x16 - OpPopCountInt64x2 - OpPopCountInt64x4 - OpPopCountInt64x8 - OpPopCountMaskedInt8x16 - OpPopCountMaskedInt8x32 - OpPopCountMaskedInt8x64 - OpPopCountMaskedInt16x8 - OpPopCountMaskedInt16x16 - OpPopCountMaskedInt16x32 - OpPopCountMaskedInt32x4 - OpPopCountMaskedInt32x8 - OpPopCountMaskedInt32x16 - OpPopCountMaskedInt64x2 - OpPopCountMaskedInt64x4 - OpPopCountMaskedInt64x8 - OpPopCountMaskedUint8x16 - OpPopCountMaskedUint8x32 - OpPopCountMaskedUint8x64 - OpPopCountMaskedUint16x8 - OpPopCountMaskedUint16x16 - OpPopCountMaskedUint16x32 - OpPopCountMaskedUint32x4 - OpPopCountMaskedUint32x8 - OpPopCountMaskedUint32x16 - OpPopCountMaskedUint64x2 - OpPopCountMaskedUint64x4 - OpPopCountMaskedUint64x8 - OpPopCountUint8x16 - OpPopCountUint8x32 - OpPopCountUint8x64 - OpPopCountUint16x8 - OpPopCountUint16x16 - OpPopCountUint16x32 - OpPopCountUint32x4 - OpPopCountUint32x8 - OpPopCountUint32x16 - OpPopCountUint64x2 - OpPopCountUint64x4 - OpPopCountUint64x8 + OpReciprocalFloat32x4 + OpReciprocalFloat32x8 + OpReciprocalFloat32x16 + OpReciprocalFloat64x2 + OpReciprocalFloat64x4 + OpReciprocalFloat64x8 + OpReciprocalMaskedFloat32x4 + OpReciprocalMaskedFloat32x8 + OpReciprocalMaskedFloat32x16 + OpReciprocalMaskedFloat64x2 + OpReciprocalMaskedFloat64x4 + OpReciprocalMaskedFloat64x8 + OpReciprocalSqrtFloat32x4 + OpReciprocalSqrtFloat32x8 + OpReciprocalSqrtFloat32x16 + OpReciprocalSqrtFloat64x2 + OpReciprocalSqrtFloat64x4 + OpReciprocalSqrtFloat64x8 + OpReciprocalSqrtMaskedFloat32x4 + OpReciprocalSqrtMaskedFloat32x8 + OpReciprocalSqrtMaskedFloat32x16 + OpReciprocalSqrtMaskedFloat64x2 + OpReciprocalSqrtMaskedFloat64x4 + OpReciprocalSqrtMaskedFloat64x8 OpRotateLeftInt32x4 OpRotateLeftInt32x8 OpRotateLeftInt32x16 @@ -5811,28 +5839,10 @@ const ( OpRotateRightUint64x2 OpRotateRightUint64x4 OpRotateRightUint64x8 - OpRoundFloat32x4 - OpRoundFloat32x8 - OpRoundFloat64x2 - OpRoundFloat64x4 - OpSaturatedAddDotProdInt32x4 - OpSaturatedAddDotProdInt32x8 - OpSaturatedAddDotProdInt32x16 - OpSaturatedAddDotProdMaskedInt32x4 - OpSaturatedAddDotProdMaskedInt32x8 - OpSaturatedAddDotProdMaskedInt32x16 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 - OpSaturatedUnsignedSignedPairDotProdUint8x16 - OpSaturatedUnsignedSignedPairDotProdUint8x32 - OpSaturatedUnsignedSignedPairDotProdUint8x64 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpRoundToEvenFloat32x4 + OpRoundToEvenFloat32x8 + OpRoundToEvenFloat64x2 + OpRoundToEvenFloat64x4 OpScaleFloat32x4 OpScaleFloat32x8 OpScaleFloat32x16 @@ -6101,12 +6111,6 @@ const ( OpShiftRightUint64x2 OpShiftRightUint64x4 OpShiftRightUint64x8 - OpSignInt8x16 - OpSignInt8x32 - OpSignInt16x8 - OpSignInt16x16 - OpSignInt32x4 - OpSignInt32x8 OpSqrtFloat32x4 OpSqrtFloat32x8 OpSqrtFloat32x16 @@ -6221,12 +6225,6 @@ const ( OpTruncFloat32x8 OpTruncFloat64x2 OpTruncFloat64x4 - OpUnsignedSignedQuadDotProdAccumulateInt32x4 - OpUnsignedSignedQuadDotProdAccumulateInt32x8 - OpUnsignedSignedQuadDotProdAccumulateInt32x16 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpXorInt8x16 OpXorInt8x32 OpXorInt8x64 @@ -6385,30 +6383,30 @@ const ( OpRotateAllRightUint64x2 OpRotateAllRightUint64x4 OpRotateAllRightUint64x8 - OpRoundScaledFloat32x4 - OpRoundScaledFloat32x8 - OpRoundScaledFloat32x16 - OpRoundScaledFloat64x2 - OpRoundScaledFloat64x4 - OpRoundScaledFloat64x8 - OpRoundScaledMaskedFloat32x4 - OpRoundScaledMaskedFloat32x8 - OpRoundScaledMaskedFloat32x16 - OpRoundScaledMaskedFloat64x2 - OpRoundScaledMaskedFloat64x4 - OpRoundScaledMaskedFloat64x8 - OpRoundScaledResidueFloat32x4 - OpRoundScaledResidueFloat32x8 - OpRoundScaledResidueFloat32x16 - OpRoundScaledResidueFloat64x2 - OpRoundScaledResidueFloat64x4 - OpRoundScaledResidueFloat64x8 - OpRoundScaledResidueMaskedFloat32x4 - OpRoundScaledResidueMaskedFloat32x8 - OpRoundScaledResidueMaskedFloat32x16 - OpRoundScaledResidueMaskedFloat64x2 - OpRoundScaledResidueMaskedFloat64x4 - OpRoundScaledResidueMaskedFloat64x8 + OpRoundToEvenScaledFloat32x4 + OpRoundToEvenScaledFloat32x8 + OpRoundToEvenScaledFloat32x16 + OpRoundToEvenScaledFloat64x2 + OpRoundToEvenScaledFloat64x4 + OpRoundToEvenScaledFloat64x8 + OpRoundToEvenScaledMaskedFloat32x4 + OpRoundToEvenScaledMaskedFloat32x8 + OpRoundToEvenScaledMaskedFloat32x16 + OpRoundToEvenScaledMaskedFloat64x2 + OpRoundToEvenScaledMaskedFloat64x4 + OpRoundToEvenScaledMaskedFloat64x8 + OpRoundToEvenScaledResidueFloat32x4 + OpRoundToEvenScaledResidueFloat32x8 + OpRoundToEvenScaledResidueFloat32x16 + OpRoundToEvenScaledResidueFloat64x2 + OpRoundToEvenScaledResidueFloat64x4 + OpRoundToEvenScaledResidueFloat64x8 + OpRoundToEvenScaledResidueMaskedFloat32x4 + OpRoundToEvenScaledResidueMaskedFloat32x8 + OpRoundToEvenScaledResidueMaskedFloat32x16 + OpRoundToEvenScaledResidueMaskedFloat64x2 + OpRoundToEvenScaledResidueMaskedFloat64x4 + OpRoundToEvenScaledResidueMaskedFloat64x8 OpSetElemInt8x16 OpSetElemInt16x8 OpSetElemInt32x4 @@ -22405,6 +22403,192 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDUSB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDW128", argLen: 2, @@ -27016,69 +27200,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMULDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHUW128", argLen: 2, @@ -27109,21 +27230,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMULHUWMasked128", argLen: 3, @@ -27140,22 +27246,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHUWMasked512", argLen: 3, @@ -27172,36 +27262,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULHW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHW512", argLen: 2, @@ -27217,22 +27277,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHWMasked256", argLen: 3, @@ -27249,22 +27293,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULLD128", argLen: 2, @@ -27574,69 +27602,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULUDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMULUDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPOPCNTB128", argLen: 1, @@ -31144,6 +31109,180 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBUSB128", + argLen: 2, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSB256", + argLen: 2, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSB512", + argLen: 2, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSBMasked128", + argLen: 3, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSBMasked256", + argLen: 3, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSBMasked512", + argLen: 3, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSW128", + argLen: 2, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSW256", + argLen: 2, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSW512", + argLen: 2, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSWMasked128", + argLen: 3, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSWMasked256", + argLen: 3, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSWMasked512", + argLen: 3, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBW128", argLen: 2, @@ -32625,54 +32764,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VDPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDPPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VCMPPS128", auxType: auxInt8, @@ -63258,152 +63349,212 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AbsoluteInt8x16", + name: "AbsInt8x16", argLen: 1, generic: true, }, { - name: "AbsoluteInt8x32", + name: "AbsInt8x32", argLen: 1, generic: true, }, { - name: "AbsoluteInt8x64", + name: "AbsInt8x64", argLen: 1, generic: true, }, { - name: "AbsoluteInt16x8", + name: "AbsInt16x8", argLen: 1, generic: true, }, { - name: "AbsoluteInt16x16", + name: "AbsInt16x16", argLen: 1, generic: true, }, { - name: "AbsoluteInt16x32", + name: "AbsInt16x32", argLen: 1, generic: true, }, { - name: "AbsoluteInt32x4", + name: "AbsInt32x4", argLen: 1, generic: true, }, { - name: "AbsoluteInt32x8", + name: "AbsInt32x8", argLen: 1, generic: true, }, { - name: "AbsoluteInt32x16", + name: "AbsInt32x16", argLen: 1, generic: true, }, { - name: "AbsoluteInt64x2", + name: "AbsInt64x2", argLen: 1, generic: true, }, { - name: "AbsoluteInt64x4", + name: "AbsInt64x4", argLen: 1, generic: true, }, { - name: "AbsoluteInt64x8", + name: "AbsInt64x8", argLen: 1, generic: true, }, { - name: "AbsoluteMaskedInt8x16", + name: "AbsMaskedInt8x16", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt8x32", + name: "AbsMaskedInt8x32", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt8x64", + name: "AbsMaskedInt8x64", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x8", + name: "AbsMaskedInt16x8", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x16", + name: "AbsMaskedInt16x16", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x32", + name: "AbsMaskedInt16x32", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x4", + name: "AbsMaskedInt32x4", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x8", + name: "AbsMaskedInt32x8", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x16", + name: "AbsMaskedInt32x16", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt64x2", + name: "AbsMaskedInt64x2", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt64x4", + name: "AbsMaskedInt64x4", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt64x8", + name: "AbsMaskedInt64x8", argLen: 2, generic: true, }, { - name: "AddDotProdInt32x4", + name: "AddDotProdPairsSaturatedInt32x4", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedInt32x8", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedMaskedInt32x16", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleInt32x4", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdQuadrupleInt32x8", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdQuadrupleInt32x16", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdQuadrupleMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleMaskedInt32x16", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleSaturatedInt32x4", argLen: 3, generic: true, }, { - name: "AddDotProdInt32x8", + name: "AddDotProdQuadrupleSaturatedInt32x8", argLen: 3, generic: true, }, { - name: "AddDotProdInt32x16", + name: "AddDotProdQuadrupleSaturatedInt32x16", argLen: 3, generic: true, }, { - name: "AddDotProdMaskedInt32x4", + name: "AddDotProdQuadrupleSaturatedMaskedInt32x4", argLen: 4, generic: true, }, { - name: "AddDotProdMaskedInt32x8", + name: "AddDotProdQuadrupleSaturatedMaskedInt32x8", argLen: 4, generic: true, }, { - name: "AddDotProdMaskedInt32x16", + name: "AddDotProdQuadrupleSaturatedMaskedInt32x16", argLen: 4, generic: true, }, @@ -64397,126 +64548,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "ApproximateReciprocalFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat32x16", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat64x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat32x16", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat64x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", - argLen: 2, - generic: true, - }, { name: "AverageMaskedUint8x16", argLen: 3, @@ -64819,6 +64850,36 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CopySignInt8x16", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt8x32", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt16x8", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt16x16", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt32x4", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt32x8", + argLen: 2, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, @@ -64880,22 +64941,64 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "DotProdBroadcastFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "DotProdPairsInt16x8", + argLen: 2, + generic: true, }, { - name: "DotProdBroadcastFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "DotProdPairsInt16x16", + argLen: 2, + generic: true, }, { - name: "DotProdBroadcastFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "DotProdPairsInt16x32", + argLen: 2, + generic: true, + }, + { + name: "DotProdPairsMaskedInt16x8", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsMaskedInt16x16", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedMaskedUint8x16", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedMaskedUint8x32", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedMaskedUint8x64", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "DotProdPairsSaturatedUint8x32", + argLen: 2, + generic: true, + }, + { + name: "DotProdPairsSaturatedUint8x64", + argLen: 2, + generic: true, }, { name: "EqualFloat32x4", @@ -65427,186 +65530,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "FusedMultiplyAddFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat64x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat64x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat64x8", - argLen: 4, - generic: true, - }, { name: "GaloisFieldMulMaskedUint8x16", argLen: 3, @@ -67829,6 +67752,126 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulAddFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddMaskedFloat32x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat64x2", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubMaskedFloat32x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat64x2", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat64x8", + argLen: 4, + generic: true, + }, { name: "MulEvenWidenInt32x4", argLen: 2, @@ -67842,338 +67885,398 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MulEvenWidenInt64x2", + name: "MulEvenWidenUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x4", + name: "MulEvenWidenUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "MulFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x2", - argLen: 3, + name: "MulFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x4", - argLen: 3, + name: "MulFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x8", - argLen: 3, + name: "MulFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedUint64x2", - argLen: 3, + name: "MulFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedUint64x4", - argLen: 3, + name: "MulFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedUint64x8", - argLen: 3, + name: "MulHighInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x4", + name: "MulHighInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x8", + name: "MulHighInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x2", - argLen: 2, + name: "MulHighMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x4", - argLen: 2, + name: "MulHighMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x8", - argLen: 2, + name: "MulHighMaskedInt16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "MulInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "MulInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x16", + name: "MulInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x2", + name: "MulInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x4", + name: "MulInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x8", + name: "MulInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", + name: "MulInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x16", + name: "MulInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x32", + name: "MulInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x8", + name: "MulMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x16", + name: "MulMaskedFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x32", + name: "MulMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x8", + name: "MulMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x16", + name: "MulMaskedFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x32", + name: "MulMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighUint16x8", - argLen: 2, + name: "MulMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulHighUint16x16", - argLen: 2, + name: "MulMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulHighUint16x32", - argLen: 2, + name: "MulMaskedInt16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt16x8", - argLen: 2, + name: "MulMaskedInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt16x16", - argLen: 2, + name: "MulMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt16x32", - argLen: 2, + name: "MulMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt32x4", - argLen: 2, + name: "MulMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt32x8", - argLen: 2, + name: "MulMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt32x16", - argLen: 2, + name: "MulMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt64x2", - argLen: 2, + name: "MulMaskedUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt64x4", - argLen: 2, + name: "MulMaskedUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt64x8", - argLen: 2, + name: "MulMaskedUint16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat32x4", + name: "MulMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat32x8", + name: "MulMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat32x16", + name: "MulMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat64x2", + name: "MulMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat64x4", + name: "MulMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat64x8", + name: "MulMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedInt16x8", - argLen: 3, + name: "MulSubAddFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddMaskedFloat32x4", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat64x2", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MulUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt16x16", - argLen: 3, + name: "MulUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt16x32", - argLen: 3, + name: "MulUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt32x4", - argLen: 3, + name: "MulUint32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt32x8", - argLen: 3, + name: "MulUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt32x16", - argLen: 3, + name: "MulUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt64x2", - argLen: 3, + name: "MulUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt64x4", - argLen: 3, + name: "MulUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt64x8", - argLen: 3, + name: "MulUint64x8", + argLen: 2, commutative: true, generic: true, }, @@ -68537,6 +68640,246 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OnesCountInt8x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt8x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt8x64", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt16x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt16x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt16x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt32x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt32x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt32x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt64x2", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt64x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt64x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountMaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt8x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt64x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint8x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint8x64", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint32x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint32x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint64x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint64x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountUint8x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint8x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint8x64", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint16x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint16x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint16x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint32x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint32x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint32x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint64x2", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint64x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint64x8", + argLen: 1, + generic: true, + }, { name: "OrInt8x16", argLen: 2, @@ -68753,36 +69096,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "PairDotProdInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdInt16x32", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x32", - argLen: 3, - generic: true, - }, { name: "Permute2Float32x4", argLen: 3, @@ -69324,243 +69637,123 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountInt8x16", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt8x32", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt8x64", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt16x8", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt16x16", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt16x32", + name: "ReciprocalFloat32x4", argLen: 1, generic: true, }, { - name: "PopCountInt32x4", + name: "ReciprocalFloat32x8", argLen: 1, generic: true, }, { - name: "PopCountInt32x8", + name: "ReciprocalFloat32x16", argLen: 1, generic: true, }, { - name: "PopCountInt32x16", + name: "ReciprocalFloat64x2", argLen: 1, generic: true, }, { - name: "PopCountInt64x2", + name: "ReciprocalFloat64x4", argLen: 1, generic: true, }, { - name: "PopCountInt64x4", + name: "ReciprocalFloat64x8", argLen: 1, generic: true, }, { - name: "PopCountInt64x8", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt8x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt8x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt8x64", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt32x4", + name: "ReciprocalMaskedFloat32x4", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt32x8", + name: "ReciprocalMaskedFloat32x8", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt32x16", + name: "ReciprocalMaskedFloat32x16", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt64x2", + name: "ReciprocalMaskedFloat64x2", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt64x4", + name: "ReciprocalMaskedFloat64x4", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt64x8", + name: "ReciprocalMaskedFloat64x8", argLen: 2, generic: true, }, { - name: "PopCountMaskedUint8x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint8x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint8x64", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint16x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint16x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint16x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint32x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint64x2", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint64x4", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint64x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountUint8x16", + name: "ReciprocalSqrtFloat32x4", argLen: 1, generic: true, }, { - name: "PopCountUint8x32", + name: "ReciprocalSqrtFloat32x8", argLen: 1, generic: true, }, { - name: "PopCountUint8x64", + name: "ReciprocalSqrtFloat32x16", argLen: 1, generic: true, }, { - name: "PopCountUint16x8", + name: "ReciprocalSqrtFloat64x2", argLen: 1, generic: true, }, { - name: "PopCountUint16x16", + name: "ReciprocalSqrtFloat64x4", argLen: 1, generic: true, }, { - name: "PopCountUint16x32", + name: "ReciprocalSqrtFloat64x8", argLen: 1, generic: true, }, { - name: "PopCountUint32x4", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat32x4", + argLen: 2, generic: true, }, { - name: "PopCountUint32x8", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat32x8", + argLen: 2, generic: true, }, { - name: "PopCountUint32x16", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat32x16", + argLen: 2, generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat64x2", + argLen: 2, generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat64x4", + argLen: 2, generic: true, }, { - name: "PopCountUint64x8", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { @@ -69804,115 +69997,25 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RoundFloat32x4", + name: "RoundToEvenFloat32x4", argLen: 1, generic: true, }, { - name: "RoundFloat32x8", + name: "RoundToEvenFloat32x8", argLen: 1, generic: true, }, { - name: "RoundFloat64x2", + name: "RoundToEvenFloat64x2", argLen: 1, generic: true, }, { - name: "RoundFloat64x4", + name: "RoundToEvenFloat64x4", argLen: 1, generic: true, }, - { - name: "SaturatedAddDotProdInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedAddDotProdInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedAddDotProdInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedAddDotProdMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedAddDotProdMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedAddDotProdMaskedInt32x16", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "ScaleFloat32x4", argLen: 2, @@ -71253,36 +71356,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SignInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt8x32", - argLen: 2, - generic: true, - }, - { - name: "SignInt16x8", - argLen: 2, - generic: true, - }, - { - name: "SignInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt32x4", - argLen: 2, - generic: true, - }, - { - name: "SignInt32x8", - argLen: 2, - generic: true, - }, { name: "SqrtFloat32x4", argLen: 1, @@ -71853,36 +71926,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "XorInt8x16", argLen: 2, @@ -72826,145 +72869,145 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RoundScaledFloat32x4", + name: "RoundToEvenScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat32x8", + name: "RoundToEvenScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat32x16", + name: "RoundToEvenScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat64x2", + name: "RoundToEvenScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat64x4", + name: "RoundToEvenScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat64x8", + name: "RoundToEvenScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledMaskedFloat32x4", + name: "RoundToEvenScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat32x8", + name: "RoundToEvenScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat32x16", + name: "RoundToEvenScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat64x2", + name: "RoundToEvenScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat64x4", + name: "RoundToEvenScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat64x8", + name: "RoundToEvenScaledMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueFloat32x4", + name: "RoundToEvenScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat32x8", + name: "RoundToEvenScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat32x16", + name: "RoundToEvenScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat64x2", + name: "RoundToEvenScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat64x4", + name: "RoundToEvenScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat64x8", + name: "RoundToEvenScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueMaskedFloat32x4", + name: "RoundToEvenScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat32x8", + name: "RoundToEvenScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat32x16", + name: "RoundToEvenScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat64x2", + name: "RoundToEvenScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat64x4", + name: "RoundToEvenScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat64x8", + name: "RoundToEvenScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index eacb30768f8a9d..20d014361ee899 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -559,66 +559,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64XORQload(v) case OpAMD64XORQmodify: return rewriteValueAMD64_OpAMD64XORQmodify(v) - case OpAbsoluteInt16x16: + case OpAbsInt16x16: v.Op = OpAMD64VPABSW256 return true - case OpAbsoluteInt16x32: + case OpAbsInt16x32: v.Op = OpAMD64VPABSW512 return true - case OpAbsoluteInt16x8: + case OpAbsInt16x8: v.Op = OpAMD64VPABSW128 return true - case OpAbsoluteInt32x16: + case OpAbsInt32x16: v.Op = OpAMD64VPABSD512 return true - case OpAbsoluteInt32x4: + case OpAbsInt32x4: v.Op = OpAMD64VPABSD128 return true - case OpAbsoluteInt32x8: + case OpAbsInt32x8: v.Op = OpAMD64VPABSD256 return true - case OpAbsoluteInt64x2: + case OpAbsInt64x2: v.Op = OpAMD64VPABSQ128 return true - case OpAbsoluteInt64x4: + case OpAbsInt64x4: v.Op = OpAMD64VPABSQ256 return true - case OpAbsoluteInt64x8: + case OpAbsInt64x8: v.Op = OpAMD64VPABSQ512 return true - case OpAbsoluteInt8x16: + case OpAbsInt8x16: v.Op = OpAMD64VPABSB128 return true - case OpAbsoluteInt8x32: + case OpAbsInt8x32: v.Op = OpAMD64VPABSB256 return true - case OpAbsoluteInt8x64: + case OpAbsInt8x64: v.Op = OpAMD64VPABSB512 return true - case OpAbsoluteMaskedInt16x16: - return rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v) - case OpAbsoluteMaskedInt16x32: - return rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v) - case OpAbsoluteMaskedInt16x8: - return rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v) - case OpAbsoluteMaskedInt32x16: - return rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v) - case OpAbsoluteMaskedInt32x4: - return rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v) - case OpAbsoluteMaskedInt32x8: - return rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v) - case OpAbsoluteMaskedInt64x2: - return rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v) - case OpAbsoluteMaskedInt64x4: - return rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v) - case OpAbsoluteMaskedInt64x8: - return rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v) - case OpAbsoluteMaskedInt8x16: - return rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v) - case OpAbsoluteMaskedInt8x32: - return rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v) - case OpAbsoluteMaskedInt8x64: - return rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v) + case OpAbsMaskedInt16x16: + return rewriteValueAMD64_OpAbsMaskedInt16x16(v) + case OpAbsMaskedInt16x32: + return rewriteValueAMD64_OpAbsMaskedInt16x32(v) + case OpAbsMaskedInt16x8: + return rewriteValueAMD64_OpAbsMaskedInt16x8(v) + case OpAbsMaskedInt32x16: + return rewriteValueAMD64_OpAbsMaskedInt32x16(v) + case OpAbsMaskedInt32x4: + return rewriteValueAMD64_OpAbsMaskedInt32x4(v) + case OpAbsMaskedInt32x8: + return rewriteValueAMD64_OpAbsMaskedInt32x8(v) + case OpAbsMaskedInt64x2: + return rewriteValueAMD64_OpAbsMaskedInt64x2(v) + case OpAbsMaskedInt64x4: + return rewriteValueAMD64_OpAbsMaskedInt64x4(v) + case OpAbsMaskedInt64x8: + return rewriteValueAMD64_OpAbsMaskedInt64x8(v) + case OpAbsMaskedInt8x16: + return rewriteValueAMD64_OpAbsMaskedInt8x16(v) + case OpAbsMaskedInt8x32: + return rewriteValueAMD64_OpAbsMaskedInt8x32(v) + case OpAbsMaskedInt8x64: + return rewriteValueAMD64_OpAbsMaskedInt8x64(v) case OpAdd16: v.Op = OpAMD64ADDL return true @@ -637,21 +637,51 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true - case OpAddDotProdInt32x16: - v.Op = OpAMD64VPDPWSSD512 + case OpAddDotProdPairsSaturatedInt32x16: + v.Op = OpAMD64VPDPWSSDS512 + return true + case OpAddDotProdPairsSaturatedInt32x4: + v.Op = OpAMD64VPDPWSSDS128 + return true + case OpAddDotProdPairsSaturatedInt32x8: + v.Op = OpAMD64VPDPWSSDS256 + return true + case OpAddDotProdPairsSaturatedMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x16(v) + case OpAddDotProdPairsSaturatedMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x4(v) + case OpAddDotProdPairsSaturatedMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x8(v) + case OpAddDotProdQuadrupleInt32x16: + v.Op = OpAMD64VPDPBUSD512 + return true + case OpAddDotProdQuadrupleInt32x4: + v.Op = OpAMD64VPDPBUSD128 return true - case OpAddDotProdInt32x4: - v.Op = OpAMD64VPDPWSSD128 + case OpAddDotProdQuadrupleInt32x8: + v.Op = OpAMD64VPDPBUSD256 return true - case OpAddDotProdInt32x8: - v.Op = OpAMD64VPDPWSSD256 + case OpAddDotProdQuadrupleMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x16(v) + case OpAddDotProdQuadrupleMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x4(v) + case OpAddDotProdQuadrupleMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x8(v) + case OpAddDotProdQuadrupleSaturatedInt32x16: + v.Op = OpAMD64VPDPBUSDS512 return true - case OpAddDotProdMaskedInt32x16: - return rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v) - case OpAddDotProdMaskedInt32x4: - return rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v) - case OpAddDotProdMaskedInt32x8: - return rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v) + case OpAddDotProdQuadrupleSaturatedInt32x4: + v.Op = OpAMD64VPDPBUSDS128 + return true + case OpAddDotProdQuadrupleSaturatedInt32x8: + v.Op = OpAMD64VPDPBUSDS256 + return true + case OpAddDotProdQuadrupleSaturatedMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x16(v) + case OpAddDotProdQuadrupleSaturatedMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x4(v) + case OpAddDotProdQuadrupleSaturatedMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x8(v) case OpAddFloat32x16: v.Op = OpAMD64VADDPS512 return true @@ -854,22 +884,22 @@ func rewriteValueAMD64(v *Value) bool { case OpAddSaturatedMaskedUint8x64: return rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v) case OpAddSaturatedUint16x16: - v.Op = OpAMD64VPADDSW256 + v.Op = OpAMD64VPADDUSW256 return true case OpAddSaturatedUint16x32: - v.Op = OpAMD64VPADDSW512 + v.Op = OpAMD64VPADDUSW512 return true case OpAddSaturatedUint16x8: - v.Op = OpAMD64VPADDSW128 + v.Op = OpAMD64VPADDUSW128 return true case OpAddSaturatedUint8x16: - v.Op = OpAMD64VPADDSB128 + v.Op = OpAMD64VPADDUSB128 return true case OpAddSaturatedUint8x32: - v.Op = OpAMD64VPADDSB256 + v.Op = OpAMD64VPADDUSB256 return true case OpAddSaturatedUint8x64: - v.Op = OpAMD64VPADDSB512 + v.Op = OpAMD64VPADDUSB512 return true case OpAddSubFloat32x4: v.Op = OpAMD64VADDSUBPS128 @@ -1128,66 +1158,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndUint8x64: v.Op = OpAMD64VPANDD512 return true - case OpApproximateReciprocalFloat32x16: - v.Op = OpAMD64VRCP14PS512 - return true - case OpApproximateReciprocalFloat32x4: - v.Op = OpAMD64VRCPPS128 - return true - case OpApproximateReciprocalFloat32x8: - v.Op = OpAMD64VRCPPS256 - return true - case OpApproximateReciprocalFloat64x2: - v.Op = OpAMD64VRCP14PD128 - return true - case OpApproximateReciprocalFloat64x4: - v.Op = OpAMD64VRCP14PD256 - return true - case OpApproximateReciprocalFloat64x8: - v.Op = OpAMD64VRCP14PD512 - return true - case OpApproximateReciprocalMaskedFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v) - case OpApproximateReciprocalMaskedFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v) - case OpApproximateReciprocalMaskedFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v) - case OpApproximateReciprocalMaskedFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v) - case OpApproximateReciprocalMaskedFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v) - case OpApproximateReciprocalMaskedFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v) - case OpApproximateReciprocalOfSqrtFloat32x16: - v.Op = OpAMD64VRSQRT14PS512 - return true - case OpApproximateReciprocalOfSqrtFloat32x4: - v.Op = OpAMD64VRSQRTPS128 - return true - case OpApproximateReciprocalOfSqrtFloat32x8: - v.Op = OpAMD64VRSQRTPS256 - return true - case OpApproximateReciprocalOfSqrtFloat64x2: - v.Op = OpAMD64VRSQRT14PD128 - return true - case OpApproximateReciprocalOfSqrtFloat64x4: - v.Op = OpAMD64VRSQRT14PD256 - return true - case OpApproximateReciprocalOfSqrtFloat64x8: - v.Op = OpAMD64VRSQRT14PD512 - return true - case OpApproximateReciprocalOfSqrtMaskedFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v) - case OpApproximateReciprocalOfSqrtMaskedFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v) - case OpApproximateReciprocalOfSqrtMaskedFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v) - case OpApproximateReciprocalOfSqrtMaskedFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v) - case OpApproximateReciprocalOfSqrtMaskedFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v) - case OpApproximateReciprocalOfSqrtMaskedFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v) case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -1468,6 +1438,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v) case OpConvertToUint32MaskedFloat32x8: return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v) + case OpCopySignInt16x16: + v.Op = OpAMD64VPSIGNW256 + return true + case OpCopySignInt16x8: + v.Op = OpAMD64VPSIGNW128 + return true + case OpCopySignInt32x4: + v.Op = OpAMD64VPSIGND128 + return true + case OpCopySignInt32x8: + v.Op = OpAMD64VPSIGND256 + return true + case OpCopySignInt8x16: + v.Op = OpAMD64VPSIGNB128 + return true + case OpCopySignInt8x32: + v.Op = OpAMD64VPSIGNB256 + return true case OpCtz16: return rewriteValueAMD64_OpCtz16(v) case OpCtz16NonZero: @@ -1620,12 +1608,36 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDivMaskedFloat64x4(v) case OpDivMaskedFloat64x8: return rewriteValueAMD64_OpDivMaskedFloat64x8(v) - case OpDotProdBroadcastFloat32x4: - return rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v) - case OpDotProdBroadcastFloat32x8: - return rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v) - case OpDotProdBroadcastFloat64x2: - return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) + case OpDotProdPairsInt16x16: + v.Op = OpAMD64VPMADDWD256 + return true + case OpDotProdPairsInt16x32: + v.Op = OpAMD64VPMADDWD512 + return true + case OpDotProdPairsInt16x8: + v.Op = OpAMD64VPMADDWD128 + return true + case OpDotProdPairsMaskedInt16x16: + return rewriteValueAMD64_OpDotProdPairsMaskedInt16x16(v) + case OpDotProdPairsMaskedInt16x32: + return rewriteValueAMD64_OpDotProdPairsMaskedInt16x32(v) + case OpDotProdPairsMaskedInt16x8: + return rewriteValueAMD64_OpDotProdPairsMaskedInt16x8(v) + case OpDotProdPairsSaturatedMaskedUint8x16: + return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x16(v) + case OpDotProdPairsSaturatedMaskedUint8x32: + return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x32(v) + case OpDotProdPairsSaturatedMaskedUint8x64: + return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x64(v) + case OpDotProdPairsSaturatedUint8x16: + v.Op = OpAMD64VPMADDUBSW128 + return true + case OpDotProdPairsSaturatedUint8x32: + v.Op = OpAMD64VPMADDUBSW256 + return true + case OpDotProdPairsSaturatedUint8x64: + v.Op = OpAMD64VPMADDUBSW512 + return true case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -1898,96 +1910,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v) case OpFloorScaledResidueMaskedFloat64x8: return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v) - case OpFusedMultiplyAddFloat32x16: - v.Op = OpAMD64VFMADD213PS512 - return true - case OpFusedMultiplyAddFloat32x4: - v.Op = OpAMD64VFMADD213PS128 - return true - case OpFusedMultiplyAddFloat32x8: - v.Op = OpAMD64VFMADD213PS256 - return true - case OpFusedMultiplyAddFloat64x2: - v.Op = OpAMD64VFMADD213PD128 - return true - case OpFusedMultiplyAddFloat64x4: - v.Op = OpAMD64VFMADD213PD256 - return true - case OpFusedMultiplyAddFloat64x8: - v.Op = OpAMD64VFMADD213PD512 - return true - case OpFusedMultiplyAddMaskedFloat32x16: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v) - case OpFusedMultiplyAddMaskedFloat32x4: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v) - case OpFusedMultiplyAddMaskedFloat32x8: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v) - case OpFusedMultiplyAddMaskedFloat64x2: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v) - case OpFusedMultiplyAddMaskedFloat64x4: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v) - case OpFusedMultiplyAddMaskedFloat64x8: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v) - case OpFusedMultiplyAddSubFloat32x16: - v.Op = OpAMD64VFMADDSUB213PS512 - return true - case OpFusedMultiplyAddSubFloat32x4: - v.Op = OpAMD64VFMADDSUB213PS128 - return true - case OpFusedMultiplyAddSubFloat32x8: - v.Op = OpAMD64VFMADDSUB213PS256 - return true - case OpFusedMultiplyAddSubFloat64x2: - v.Op = OpAMD64VFMADDSUB213PD128 - return true - case OpFusedMultiplyAddSubFloat64x4: - v.Op = OpAMD64VFMADDSUB213PD256 - return true - case OpFusedMultiplyAddSubFloat64x8: - v.Op = OpAMD64VFMADDSUB213PD512 - return true - case OpFusedMultiplyAddSubMaskedFloat32x16: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v) - case OpFusedMultiplyAddSubMaskedFloat32x4: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v) - case OpFusedMultiplyAddSubMaskedFloat32x8: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v) - case OpFusedMultiplyAddSubMaskedFloat64x2: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v) - case OpFusedMultiplyAddSubMaskedFloat64x4: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v) - case OpFusedMultiplyAddSubMaskedFloat64x8: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v) - case OpFusedMultiplySubAddFloat32x16: - v.Op = OpAMD64VFMSUBADD213PS512 - return true - case OpFusedMultiplySubAddFloat32x4: - v.Op = OpAMD64VFMSUBADD213PS128 - return true - case OpFusedMultiplySubAddFloat32x8: - v.Op = OpAMD64VFMSUBADD213PS256 - return true - case OpFusedMultiplySubAddFloat64x2: - v.Op = OpAMD64VFMSUBADD213PD128 - return true - case OpFusedMultiplySubAddFloat64x4: - v.Op = OpAMD64VFMSUBADD213PD256 - return true - case OpFusedMultiplySubAddFloat64x8: - v.Op = OpAMD64VFMSUBADD213PD512 - return true - case OpFusedMultiplySubAddMaskedFloat32x16: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v) - case OpFusedMultiplySubAddMaskedFloat32x4: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v) - case OpFusedMultiplySubAddMaskedFloat32x8: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v) - case OpFusedMultiplySubAddMaskedFloat64x2: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v) - case OpFusedMultiplySubAddMaskedFloat64x4: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v) - case OpFusedMultiplySubAddMaskedFloat64x8: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v) case OpGaloisFieldAffineTransformInverseMaskedUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v) case OpGaloisFieldAffineTransformInverseMaskedUint8x32: @@ -3138,48 +3060,78 @@ func rewriteValueAMD64(v *Value) bool { case OpMul8: v.Op = OpAMD64MULL return true - case OpMulEvenWidenInt32x4: - v.Op = OpAMD64VPMULDQ128 + case OpMulAddFloat32x16: + v.Op = OpAMD64VFMADD213PS512 return true - case OpMulEvenWidenInt32x8: - v.Op = OpAMD64VPMULDQ256 + case OpMulAddFloat32x4: + v.Op = OpAMD64VFMADD213PS128 + return true + case OpMulAddFloat32x8: + v.Op = OpAMD64VFMADD213PS256 + return true + case OpMulAddFloat64x2: + v.Op = OpAMD64VFMADD213PD128 + return true + case OpMulAddFloat64x4: + v.Op = OpAMD64VFMADD213PD256 return true - case OpMulEvenWidenInt64x2: + case OpMulAddFloat64x8: + v.Op = OpAMD64VFMADD213PD512 + return true + case OpMulAddMaskedFloat32x16: + return rewriteValueAMD64_OpMulAddMaskedFloat32x16(v) + case OpMulAddMaskedFloat32x4: + return rewriteValueAMD64_OpMulAddMaskedFloat32x4(v) + case OpMulAddMaskedFloat32x8: + return rewriteValueAMD64_OpMulAddMaskedFloat32x8(v) + case OpMulAddMaskedFloat64x2: + return rewriteValueAMD64_OpMulAddMaskedFloat64x2(v) + case OpMulAddMaskedFloat64x4: + return rewriteValueAMD64_OpMulAddMaskedFloat64x4(v) + case OpMulAddMaskedFloat64x8: + return rewriteValueAMD64_OpMulAddMaskedFloat64x8(v) + case OpMulAddSubFloat32x16: + v.Op = OpAMD64VFMADDSUB213PS512 + return true + case OpMulAddSubFloat32x4: + v.Op = OpAMD64VFMADDSUB213PS128 + return true + case OpMulAddSubFloat32x8: + v.Op = OpAMD64VFMADDSUB213PS256 + return true + case OpMulAddSubFloat64x2: + v.Op = OpAMD64VFMADDSUB213PD128 + return true + case OpMulAddSubFloat64x4: + v.Op = OpAMD64VFMADDSUB213PD256 + return true + case OpMulAddSubFloat64x8: + v.Op = OpAMD64VFMADDSUB213PD512 + return true + case OpMulAddSubMaskedFloat32x16: + return rewriteValueAMD64_OpMulAddSubMaskedFloat32x16(v) + case OpMulAddSubMaskedFloat32x4: + return rewriteValueAMD64_OpMulAddSubMaskedFloat32x4(v) + case OpMulAddSubMaskedFloat32x8: + return rewriteValueAMD64_OpMulAddSubMaskedFloat32x8(v) + case OpMulAddSubMaskedFloat64x2: + return rewriteValueAMD64_OpMulAddSubMaskedFloat64x2(v) + case OpMulAddSubMaskedFloat64x4: + return rewriteValueAMD64_OpMulAddSubMaskedFloat64x4(v) + case OpMulAddSubMaskedFloat64x8: + return rewriteValueAMD64_OpMulAddSubMaskedFloat64x8(v) + case OpMulEvenWidenInt32x4: v.Op = OpAMD64VPMULDQ128 return true - case OpMulEvenWidenInt64x4: + case OpMulEvenWidenInt32x8: v.Op = OpAMD64VPMULDQ256 return true - case OpMulEvenWidenInt64x8: - v.Op = OpAMD64VPMULDQ512 - return true - case OpMulEvenWidenMaskedInt64x2: - return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v) - case OpMulEvenWidenMaskedInt64x4: - return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v) - case OpMulEvenWidenMaskedInt64x8: - return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v) - case OpMulEvenWidenMaskedUint64x2: - return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v) - case OpMulEvenWidenMaskedUint64x4: - return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v) - case OpMulEvenWidenMaskedUint64x8: - return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v) case OpMulEvenWidenUint32x4: v.Op = OpAMD64VPMULUDQ128 return true case OpMulEvenWidenUint32x8: v.Op = OpAMD64VPMULUDQ256 return true - case OpMulEvenWidenUint64x2: - v.Op = OpAMD64VPMULUDQ128 - return true - case OpMulEvenWidenUint64x4: - v.Op = OpAMD64VPMULUDQ256 - return true - case OpMulEvenWidenUint64x8: - v.Op = OpAMD64VPMULUDQ512 - return true case OpMulFloat32x16: v.Op = OpAMD64VMULPS512 return true @@ -3199,13 +3151,13 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VMULPD512 return true case OpMulHighInt16x16: - v.Op = OpAMD64VPMULHW256 + v.Op = OpAMD64VPMULHUW256 return true case OpMulHighInt16x32: v.Op = OpAMD64VPMULHW512 return true case OpMulHighInt16x8: - v.Op = OpAMD64VPMULHW128 + v.Op = OpAMD64VPMULHUW128 return true case OpMulHighMaskedInt16x16: return rewriteValueAMD64_OpMulHighMaskedInt16x16(v) @@ -3213,21 +3165,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulHighMaskedInt16x32(v) case OpMulHighMaskedInt16x8: return rewriteValueAMD64_OpMulHighMaskedInt16x8(v) - case OpMulHighMaskedUint16x16: - return rewriteValueAMD64_OpMulHighMaskedUint16x16(v) - case OpMulHighMaskedUint16x32: - return rewriteValueAMD64_OpMulHighMaskedUint16x32(v) - case OpMulHighMaskedUint16x8: - return rewriteValueAMD64_OpMulHighMaskedUint16x8(v) - case OpMulHighUint16x16: - v.Op = OpAMD64VPMULHUW256 - return true - case OpMulHighUint16x32: - v.Op = OpAMD64VPMULHUW512 - return true - case OpMulHighUint16x8: - v.Op = OpAMD64VPMULHUW128 - return true case OpMulInt16x16: v.Op = OpAMD64VPMULLW256 return true @@ -3285,6 +3222,81 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulMaskedInt64x4(v) case OpMulMaskedInt64x8: return rewriteValueAMD64_OpMulMaskedInt64x8(v) + case OpMulMaskedUint16x16: + return rewriteValueAMD64_OpMulMaskedUint16x16(v) + case OpMulMaskedUint16x32: + return rewriteValueAMD64_OpMulMaskedUint16x32(v) + case OpMulMaskedUint16x8: + return rewriteValueAMD64_OpMulMaskedUint16x8(v) + case OpMulMaskedUint32x16: + return rewriteValueAMD64_OpMulMaskedUint32x16(v) + case OpMulMaskedUint32x4: + return rewriteValueAMD64_OpMulMaskedUint32x4(v) + case OpMulMaskedUint32x8: + return rewriteValueAMD64_OpMulMaskedUint32x8(v) + case OpMulMaskedUint64x2: + return rewriteValueAMD64_OpMulMaskedUint64x2(v) + case OpMulMaskedUint64x4: + return rewriteValueAMD64_OpMulMaskedUint64x4(v) + case OpMulMaskedUint64x8: + return rewriteValueAMD64_OpMulMaskedUint64x8(v) + case OpMulSubAddFloat32x16: + v.Op = OpAMD64VFMSUBADD213PS512 + return true + case OpMulSubAddFloat32x4: + v.Op = OpAMD64VFMSUBADD213PS128 + return true + case OpMulSubAddFloat32x8: + v.Op = OpAMD64VFMSUBADD213PS256 + return true + case OpMulSubAddFloat64x2: + v.Op = OpAMD64VFMSUBADD213PD128 + return true + case OpMulSubAddFloat64x4: + v.Op = OpAMD64VFMSUBADD213PD256 + return true + case OpMulSubAddFloat64x8: + v.Op = OpAMD64VFMSUBADD213PD512 + return true + case OpMulSubAddMaskedFloat32x16: + return rewriteValueAMD64_OpMulSubAddMaskedFloat32x16(v) + case OpMulSubAddMaskedFloat32x4: + return rewriteValueAMD64_OpMulSubAddMaskedFloat32x4(v) + case OpMulSubAddMaskedFloat32x8: + return rewriteValueAMD64_OpMulSubAddMaskedFloat32x8(v) + case OpMulSubAddMaskedFloat64x2: + return rewriteValueAMD64_OpMulSubAddMaskedFloat64x2(v) + case OpMulSubAddMaskedFloat64x4: + return rewriteValueAMD64_OpMulSubAddMaskedFloat64x4(v) + case OpMulSubAddMaskedFloat64x8: + return rewriteValueAMD64_OpMulSubAddMaskedFloat64x8(v) + case OpMulUint16x16: + v.Op = OpAMD64VPMULLW256 + return true + case OpMulUint16x32: + v.Op = OpAMD64VPMULLW512 + return true + case OpMulUint16x8: + v.Op = OpAMD64VPMULLW128 + return true + case OpMulUint32x16: + v.Op = OpAMD64VPMULLD512 + return true + case OpMulUint32x4: + v.Op = OpAMD64VPMULLD128 + return true + case OpMulUint32x8: + v.Op = OpAMD64VPMULLD256 + return true + case OpMulUint64x2: + v.Op = OpAMD64VPMULLQ128 + return true + case OpMulUint64x4: + v.Op = OpAMD64VPMULLQ256 + return true + case OpMulUint64x8: + v.Op = OpAMD64VPMULLQ512 + return true case OpNeg16: v.Op = OpAMD64NEGL return true @@ -3444,6 +3456,126 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualUint8x64(v) case OpOffPtr: return rewriteValueAMD64_OpOffPtr(v) + case OpOnesCountInt16x16: + v.Op = OpAMD64VPOPCNTW256 + return true + case OpOnesCountInt16x32: + v.Op = OpAMD64VPOPCNTW512 + return true + case OpOnesCountInt16x8: + v.Op = OpAMD64VPOPCNTW128 + return true + case OpOnesCountInt32x16: + v.Op = OpAMD64VPOPCNTD512 + return true + case OpOnesCountInt32x4: + v.Op = OpAMD64VPOPCNTD128 + return true + case OpOnesCountInt32x8: + v.Op = OpAMD64VPOPCNTD256 + return true + case OpOnesCountInt64x2: + v.Op = OpAMD64VPOPCNTQ128 + return true + case OpOnesCountInt64x4: + v.Op = OpAMD64VPOPCNTQ256 + return true + case OpOnesCountInt64x8: + v.Op = OpAMD64VPOPCNTQ512 + return true + case OpOnesCountInt8x16: + v.Op = OpAMD64VPOPCNTB128 + return true + case OpOnesCountInt8x32: + v.Op = OpAMD64VPOPCNTB256 + return true + case OpOnesCountInt8x64: + v.Op = OpAMD64VPOPCNTB512 + return true + case OpOnesCountMaskedInt16x16: + return rewriteValueAMD64_OpOnesCountMaskedInt16x16(v) + case OpOnesCountMaskedInt16x32: + return rewriteValueAMD64_OpOnesCountMaskedInt16x32(v) + case OpOnesCountMaskedInt16x8: + return rewriteValueAMD64_OpOnesCountMaskedInt16x8(v) + case OpOnesCountMaskedInt32x16: + return rewriteValueAMD64_OpOnesCountMaskedInt32x16(v) + case OpOnesCountMaskedInt32x4: + return rewriteValueAMD64_OpOnesCountMaskedInt32x4(v) + case OpOnesCountMaskedInt32x8: + return rewriteValueAMD64_OpOnesCountMaskedInt32x8(v) + case OpOnesCountMaskedInt64x2: + return rewriteValueAMD64_OpOnesCountMaskedInt64x2(v) + case OpOnesCountMaskedInt64x4: + return rewriteValueAMD64_OpOnesCountMaskedInt64x4(v) + case OpOnesCountMaskedInt64x8: + return rewriteValueAMD64_OpOnesCountMaskedInt64x8(v) + case OpOnesCountMaskedInt8x16: + return rewriteValueAMD64_OpOnesCountMaskedInt8x16(v) + case OpOnesCountMaskedInt8x32: + return rewriteValueAMD64_OpOnesCountMaskedInt8x32(v) + case OpOnesCountMaskedInt8x64: + return rewriteValueAMD64_OpOnesCountMaskedInt8x64(v) + case OpOnesCountMaskedUint16x16: + return rewriteValueAMD64_OpOnesCountMaskedUint16x16(v) + case OpOnesCountMaskedUint16x32: + return rewriteValueAMD64_OpOnesCountMaskedUint16x32(v) + case OpOnesCountMaskedUint16x8: + return rewriteValueAMD64_OpOnesCountMaskedUint16x8(v) + case OpOnesCountMaskedUint32x16: + return rewriteValueAMD64_OpOnesCountMaskedUint32x16(v) + case OpOnesCountMaskedUint32x4: + return rewriteValueAMD64_OpOnesCountMaskedUint32x4(v) + case OpOnesCountMaskedUint32x8: + return rewriteValueAMD64_OpOnesCountMaskedUint32x8(v) + case OpOnesCountMaskedUint64x2: + return rewriteValueAMD64_OpOnesCountMaskedUint64x2(v) + case OpOnesCountMaskedUint64x4: + return rewriteValueAMD64_OpOnesCountMaskedUint64x4(v) + case OpOnesCountMaskedUint64x8: + return rewriteValueAMD64_OpOnesCountMaskedUint64x8(v) + case OpOnesCountMaskedUint8x16: + return rewriteValueAMD64_OpOnesCountMaskedUint8x16(v) + case OpOnesCountMaskedUint8x32: + return rewriteValueAMD64_OpOnesCountMaskedUint8x32(v) + case OpOnesCountMaskedUint8x64: + return rewriteValueAMD64_OpOnesCountMaskedUint8x64(v) + case OpOnesCountUint16x16: + v.Op = OpAMD64VPOPCNTW256 + return true + case OpOnesCountUint16x32: + v.Op = OpAMD64VPOPCNTW512 + return true + case OpOnesCountUint16x8: + v.Op = OpAMD64VPOPCNTW128 + return true + case OpOnesCountUint32x16: + v.Op = OpAMD64VPOPCNTD512 + return true + case OpOnesCountUint32x4: + v.Op = OpAMD64VPOPCNTD128 + return true + case OpOnesCountUint32x8: + v.Op = OpAMD64VPOPCNTD256 + return true + case OpOnesCountUint64x2: + v.Op = OpAMD64VPOPCNTQ128 + return true + case OpOnesCountUint64x4: + v.Op = OpAMD64VPOPCNTQ256 + return true + case OpOnesCountUint64x8: + v.Op = OpAMD64VPOPCNTQ512 + return true + case OpOnesCountUint8x16: + v.Op = OpAMD64VPOPCNTB128 + return true + case OpOnesCountUint8x32: + v.Op = OpAMD64VPOPCNTB256 + return true + case OpOnesCountUint8x64: + v.Op = OpAMD64VPOPCNTB512 + return true case OpOr16: v.Op = OpAMD64ORL return true @@ -3555,21 +3687,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x64: v.Op = OpAMD64VPORD512 return true - case OpPairDotProdInt16x16: - v.Op = OpAMD64VPMADDWD256 - return true - case OpPairDotProdInt16x32: - v.Op = OpAMD64VPMADDWD512 - return true - case OpPairDotProdInt16x8: - v.Op = OpAMD64VPMADDWD128 - return true - case OpPairDotProdMaskedInt16x16: - return rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v) - case OpPairDotProdMaskedInt16x32: - return rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v) - case OpPairDotProdMaskedInt16x8: - return rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v) case OpPanicBounds: v.Op = OpAMD64LoweredPanicBoundsRR return true @@ -3853,132 +3970,72 @@ func rewriteValueAMD64(v *Value) bool { return true case OpPopCount8: return rewriteValueAMD64_OpPopCount8(v) - case OpPopCountInt16x16: - v.Op = OpAMD64VPOPCNTW256 - return true - case OpPopCountInt16x32: - v.Op = OpAMD64VPOPCNTW512 - return true - case OpPopCountInt16x8: - v.Op = OpAMD64VPOPCNTW128 - return true - case OpPopCountInt32x16: - v.Op = OpAMD64VPOPCNTD512 - return true - case OpPopCountInt32x4: - v.Op = OpAMD64VPOPCNTD128 - return true - case OpPopCountInt32x8: - v.Op = OpAMD64VPOPCNTD256 - return true - case OpPopCountInt64x2: - v.Op = OpAMD64VPOPCNTQ128 - return true - case OpPopCountInt64x4: - v.Op = OpAMD64VPOPCNTQ256 - return true - case OpPopCountInt64x8: - v.Op = OpAMD64VPOPCNTQ512 - return true - case OpPopCountInt8x16: - v.Op = OpAMD64VPOPCNTB128 - return true - case OpPopCountInt8x32: - v.Op = OpAMD64VPOPCNTB256 - return true - case OpPopCountInt8x64: - v.Op = OpAMD64VPOPCNTB512 - return true - case OpPopCountMaskedInt16x16: - return rewriteValueAMD64_OpPopCountMaskedInt16x16(v) - case OpPopCountMaskedInt16x32: - return rewriteValueAMD64_OpPopCountMaskedInt16x32(v) - case OpPopCountMaskedInt16x8: - return rewriteValueAMD64_OpPopCountMaskedInt16x8(v) - case OpPopCountMaskedInt32x16: - return rewriteValueAMD64_OpPopCountMaskedInt32x16(v) - case OpPopCountMaskedInt32x4: - return rewriteValueAMD64_OpPopCountMaskedInt32x4(v) - case OpPopCountMaskedInt32x8: - return rewriteValueAMD64_OpPopCountMaskedInt32x8(v) - case OpPopCountMaskedInt64x2: - return rewriteValueAMD64_OpPopCountMaskedInt64x2(v) - case OpPopCountMaskedInt64x4: - return rewriteValueAMD64_OpPopCountMaskedInt64x4(v) - case OpPopCountMaskedInt64x8: - return rewriteValueAMD64_OpPopCountMaskedInt64x8(v) - case OpPopCountMaskedInt8x16: - return rewriteValueAMD64_OpPopCountMaskedInt8x16(v) - case OpPopCountMaskedInt8x32: - return rewriteValueAMD64_OpPopCountMaskedInt8x32(v) - case OpPopCountMaskedInt8x64: - return rewriteValueAMD64_OpPopCountMaskedInt8x64(v) - case OpPopCountMaskedUint16x16: - return rewriteValueAMD64_OpPopCountMaskedUint16x16(v) - case OpPopCountMaskedUint16x32: - return rewriteValueAMD64_OpPopCountMaskedUint16x32(v) - case OpPopCountMaskedUint16x8: - return rewriteValueAMD64_OpPopCountMaskedUint16x8(v) - case OpPopCountMaskedUint32x16: - return rewriteValueAMD64_OpPopCountMaskedUint32x16(v) - case OpPopCountMaskedUint32x4: - return rewriteValueAMD64_OpPopCountMaskedUint32x4(v) - case OpPopCountMaskedUint32x8: - return rewriteValueAMD64_OpPopCountMaskedUint32x8(v) - case OpPopCountMaskedUint64x2: - return rewriteValueAMD64_OpPopCountMaskedUint64x2(v) - case OpPopCountMaskedUint64x4: - return rewriteValueAMD64_OpPopCountMaskedUint64x4(v) - case OpPopCountMaskedUint64x8: - return rewriteValueAMD64_OpPopCountMaskedUint64x8(v) - case OpPopCountMaskedUint8x16: - return rewriteValueAMD64_OpPopCountMaskedUint8x16(v) - case OpPopCountMaskedUint8x32: - return rewriteValueAMD64_OpPopCountMaskedUint8x32(v) - case OpPopCountMaskedUint8x64: - return rewriteValueAMD64_OpPopCountMaskedUint8x64(v) - case OpPopCountUint16x16: - v.Op = OpAMD64VPOPCNTW256 + case OpPrefetchCache: + v.Op = OpAMD64PrefetchT0 return true - case OpPopCountUint16x32: - v.Op = OpAMD64VPOPCNTW512 + case OpPrefetchCacheStreamed: + v.Op = OpAMD64PrefetchNTA return true - case OpPopCountUint16x8: - v.Op = OpAMD64VPOPCNTW128 + case OpReciprocalFloat32x16: + v.Op = OpAMD64VRCP14PS512 return true - case OpPopCountUint32x16: - v.Op = OpAMD64VPOPCNTD512 + case OpReciprocalFloat32x4: + v.Op = OpAMD64VRCPPS128 return true - case OpPopCountUint32x4: - v.Op = OpAMD64VPOPCNTD128 + case OpReciprocalFloat32x8: + v.Op = OpAMD64VRCPPS256 return true - case OpPopCountUint32x8: - v.Op = OpAMD64VPOPCNTD256 + case OpReciprocalFloat64x2: + v.Op = OpAMD64VRCP14PD128 return true - case OpPopCountUint64x2: - v.Op = OpAMD64VPOPCNTQ128 + case OpReciprocalFloat64x4: + v.Op = OpAMD64VRCP14PD256 return true - case OpPopCountUint64x4: - v.Op = OpAMD64VPOPCNTQ256 + case OpReciprocalFloat64x8: + v.Op = OpAMD64VRCP14PD512 return true - case OpPopCountUint64x8: - v.Op = OpAMD64VPOPCNTQ512 + case OpReciprocalMaskedFloat32x16: + return rewriteValueAMD64_OpReciprocalMaskedFloat32x16(v) + case OpReciprocalMaskedFloat32x4: + return rewriteValueAMD64_OpReciprocalMaskedFloat32x4(v) + case OpReciprocalMaskedFloat32x8: + return rewriteValueAMD64_OpReciprocalMaskedFloat32x8(v) + case OpReciprocalMaskedFloat64x2: + return rewriteValueAMD64_OpReciprocalMaskedFloat64x2(v) + case OpReciprocalMaskedFloat64x4: + return rewriteValueAMD64_OpReciprocalMaskedFloat64x4(v) + case OpReciprocalMaskedFloat64x8: + return rewriteValueAMD64_OpReciprocalMaskedFloat64x8(v) + case OpReciprocalSqrtFloat32x16: + v.Op = OpAMD64VRSQRT14PS512 return true - case OpPopCountUint8x16: - v.Op = OpAMD64VPOPCNTB128 + case OpReciprocalSqrtFloat32x4: + v.Op = OpAMD64VRSQRTPS128 return true - case OpPopCountUint8x32: - v.Op = OpAMD64VPOPCNTB256 + case OpReciprocalSqrtFloat32x8: + v.Op = OpAMD64VRSQRTPS256 return true - case OpPopCountUint8x64: - v.Op = OpAMD64VPOPCNTB512 + case OpReciprocalSqrtFloat64x2: + v.Op = OpAMD64VRSQRT14PD128 return true - case OpPrefetchCache: - v.Op = OpAMD64PrefetchT0 + case OpReciprocalSqrtFloat64x4: + v.Op = OpAMD64VRSQRT14PD256 return true - case OpPrefetchCacheStreamed: - v.Op = OpAMD64PrefetchNTA + case OpReciprocalSqrtFloat64x8: + v.Op = OpAMD64VRSQRT14PD512 return true + case OpReciprocalSqrtMaskedFloat32x16: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x16(v) + case OpReciprocalSqrtMaskedFloat32x4: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x4(v) + case OpReciprocalSqrtMaskedFloat32x8: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x8(v) + case OpReciprocalSqrtMaskedFloat64x2: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x2(v) + case OpReciprocalSqrtMaskedFloat64x4: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x4(v) + case OpReciprocalSqrtMaskedFloat64x8: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x8(v) case OpRotateAllLeftInt32x16: v.Op = OpAMD64VPROLD512 return true @@ -4237,64 +4294,64 @@ func rewriteValueAMD64(v *Value) bool { case OpRound64F: v.Op = OpAMD64LoweredRound64F return true - case OpRoundFloat32x4: - return rewriteValueAMD64_OpRoundFloat32x4(v) - case OpRoundFloat32x8: - return rewriteValueAMD64_OpRoundFloat32x8(v) - case OpRoundFloat64x2: - return rewriteValueAMD64_OpRoundFloat64x2(v) - case OpRoundFloat64x4: - return rewriteValueAMD64_OpRoundFloat64x4(v) - case OpRoundScaledFloat32x16: - return rewriteValueAMD64_OpRoundScaledFloat32x16(v) - case OpRoundScaledFloat32x4: - return rewriteValueAMD64_OpRoundScaledFloat32x4(v) - case OpRoundScaledFloat32x8: - return rewriteValueAMD64_OpRoundScaledFloat32x8(v) - case OpRoundScaledFloat64x2: - return rewriteValueAMD64_OpRoundScaledFloat64x2(v) - case OpRoundScaledFloat64x4: - return rewriteValueAMD64_OpRoundScaledFloat64x4(v) - case OpRoundScaledFloat64x8: - return rewriteValueAMD64_OpRoundScaledFloat64x8(v) - case OpRoundScaledMaskedFloat32x16: - return rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v) - case OpRoundScaledMaskedFloat32x4: - return rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v) - case OpRoundScaledMaskedFloat32x8: - return rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v) - case OpRoundScaledMaskedFloat64x2: - return rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v) - case OpRoundScaledMaskedFloat64x4: - return rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v) - case OpRoundScaledMaskedFloat64x8: - return rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v) - case OpRoundScaledResidueFloat32x16: - return rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v) - case OpRoundScaledResidueFloat32x4: - return rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v) - case OpRoundScaledResidueFloat32x8: - return rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v) - case OpRoundScaledResidueFloat64x2: - return rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v) - case OpRoundScaledResidueFloat64x4: - return rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v) - case OpRoundScaledResidueFloat64x8: - return rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v) - case OpRoundScaledResidueMaskedFloat32x16: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v) - case OpRoundScaledResidueMaskedFloat32x4: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v) - case OpRoundScaledResidueMaskedFloat32x8: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v) - case OpRoundScaledResidueMaskedFloat64x2: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v) - case OpRoundScaledResidueMaskedFloat64x4: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v) - case OpRoundScaledResidueMaskedFloat64x8: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) + case OpRoundToEvenFloat32x4: + return rewriteValueAMD64_OpRoundToEvenFloat32x4(v) + case OpRoundToEvenFloat32x8: + return rewriteValueAMD64_OpRoundToEvenFloat32x8(v) + case OpRoundToEvenFloat64x2: + return rewriteValueAMD64_OpRoundToEvenFloat64x2(v) + case OpRoundToEvenFloat64x4: + return rewriteValueAMD64_OpRoundToEvenFloat64x4(v) + case OpRoundToEvenScaledFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v) + case OpRoundToEvenScaledFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v) + case OpRoundToEvenScaledFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v) + case OpRoundToEvenScaledFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v) + case OpRoundToEvenScaledFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v) + case OpRoundToEvenScaledFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v) + case OpRoundToEvenScaledMaskedFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v) + case OpRoundToEvenScaledMaskedFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v) + case OpRoundToEvenScaledMaskedFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v) + case OpRoundToEvenScaledMaskedFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v) + case OpRoundToEvenScaledMaskedFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v) + case OpRoundToEvenScaledMaskedFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v) + case OpRoundToEvenScaledResidueFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v) + case OpRoundToEvenScaledResidueFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v) + case OpRoundToEvenScaledResidueFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v) + case OpRoundToEvenScaledResidueFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v) + case OpRoundToEvenScaledResidueFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v) + case OpRoundToEvenScaledResidueFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v) + case OpRoundToEvenScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v) + case OpRoundToEvenScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v) + case OpRoundToEvenScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v) + case OpRoundToEvenScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v) + case OpRoundToEvenScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v) + case OpRoundToEvenScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -4359,51 +4416,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) - case OpSaturatedAddDotProdInt32x16: - v.Op = OpAMD64VPDPWSSDS512 - return true - case OpSaturatedAddDotProdInt32x4: - v.Op = OpAMD64VPDPWSSDS128 - return true - case OpSaturatedAddDotProdInt32x8: - v.Op = OpAMD64VPDPWSSDS256 - return true - case OpSaturatedAddDotProdMaskedInt32x16: - return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v) - case OpSaturatedAddDotProdMaskedInt32x4: - return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v) - case OpSaturatedAddDotProdMaskedInt32x8: - return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v) - case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16: - return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v) - case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32: - return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v) - case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64: - return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v) - case OpSaturatedUnsignedSignedPairDotProdUint8x16: - v.Op = OpAMD64VPMADDUBSW128 - return true - case OpSaturatedUnsignedSignedPairDotProdUint8x32: - v.Op = OpAMD64VPMADDUBSW256 - return true - case OpSaturatedUnsignedSignedPairDotProdUint8x64: - v.Op = OpAMD64VPMADDUBSW512 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPBUSDS512 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPBUSDS128 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPBUSDS256 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) case OpScaleFloat32x16: v.Op = OpAMD64VSCALEFPS512 return true @@ -5246,24 +5258,6 @@ func rewriteValueAMD64(v *Value) bool { case OpSignExt8to64: v.Op = OpAMD64MOVBQSX return true - case OpSignInt16x16: - v.Op = OpAMD64VPSIGNW256 - return true - case OpSignInt16x8: - v.Op = OpAMD64VPSIGNW128 - return true - case OpSignInt32x4: - v.Op = OpAMD64VPSIGND128 - return true - case OpSignInt32x8: - v.Op = OpAMD64VPSIGND256 - return true - case OpSignInt8x16: - v.Op = OpAMD64VPSIGNB128 - return true - case OpSignInt8x32: - v.Op = OpAMD64VPSIGNB256 - return true case OpSlicemask: return rewriteValueAMD64_OpSlicemask(v) case OpSpectreIndex: @@ -5563,22 +5557,22 @@ func rewriteValueAMD64(v *Value) bool { case OpSubSaturatedMaskedUint8x64: return rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v) case OpSubSaturatedUint16x16: - v.Op = OpAMD64VPSUBSW256 + v.Op = OpAMD64VPSUBUSW256 return true case OpSubSaturatedUint16x32: - v.Op = OpAMD64VPSUBSW512 + v.Op = OpAMD64VPSUBUSW512 return true case OpSubSaturatedUint16x8: - v.Op = OpAMD64VPSUBSW128 + v.Op = OpAMD64VPSUBUSW128 return true case OpSubSaturatedUint8x16: - v.Op = OpAMD64VPSUBSB128 + v.Op = OpAMD64VPSUBUSB128 return true case OpSubSaturatedUint8x32: - v.Op = OpAMD64VPSUBSB256 + v.Op = OpAMD64VPSUBUSB256 return true case OpSubSaturatedUint8x64: - v.Op = OpAMD64VPSUBSB512 + v.Op = OpAMD64VPSUBUSB512 return true case OpSubUint16x16: v.Op = OpAMD64VPSUBW256 @@ -5695,21 +5689,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v) case OpTruncScaledResidueMaskedFloat64x8: return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v) - case OpUnsignedSignedQuadDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPBUSD512 - return true - case OpUnsignedSignedQuadDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPBUSD128 - return true - case OpUnsignedSignedQuadDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPBUSD256 - return true - case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) case OpWB: v.Op = OpAMD64LoweredWB return true @@ -28619,11 +28598,11 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt16x16 x mask) + // match: (AbsMaskedInt16x16 x mask) // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -28635,11 +28614,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt16x32 x mask) + // match: (AbsMaskedInt16x32 x mask) // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -28651,11 +28630,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt16x8 x mask) + // match: (AbsMaskedInt16x8 x mask) // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -28667,11 +28646,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt32x16 x mask) + // match: (AbsMaskedInt32x16 x mask) // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -28683,11 +28662,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt32x4 x mask) + // match: (AbsMaskedInt32x4 x mask) // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -28699,11 +28678,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt32x8 x mask) + // match: (AbsMaskedInt32x8 x mask) // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -28715,11 +28694,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt64x2 x mask) + // match: (AbsMaskedInt64x2 x mask) // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -28731,11 +28710,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt64x4 x mask) + // match: (AbsMaskedInt64x4 x mask) // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -28747,11 +28726,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt64x8 x mask) + // match: (AbsMaskedInt64x8 x mask) // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -28763,11 +28742,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt8x16 x mask) + // match: (AbsMaskedInt8x16 x mask) // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 @@ -28779,11 +28758,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt8x32 x mask) + // match: (AbsMaskedInt8x32 x mask) // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 @@ -28795,11 +28774,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt8x64 x mask) + // match: (AbsMaskedInt8x64 x mask) // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 @@ -28811,60 +28790,180 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdPairsSaturatedMaskedInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdPairsSaturatedMaskedInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdPairsSaturatedMaskedInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdQuadrupleMaskedInt32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdQuadrupleMaskedInt32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdQuadrupleMaskedInt32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddDotProdMaskedInt32x16 x y z mask) - // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (AddDotProdQuadrupleSaturatedMaskedInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 z := v_2 mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked512) + v.reset(OpAMD64VPDPBUSDSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddDotProdMaskedInt32x4 x y z mask) - // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (AddDotProdQuadrupleSaturatedMaskedInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 z := v_2 mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked128) + v.reset(OpAMD64VPDPBUSDSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddDotProdMaskedInt32x8 x y z mask) - // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (AddDotProdQuadrupleSaturatedMaskedInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 z := v_2 mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked256) + v.reset(OpAMD64VPDPBUSDSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(x, y, z, v0) @@ -29525,12 +29624,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // result: (VPADDUSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) + v.reset(OpAMD64VPADDUSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29543,12 +29642,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPADDUSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) + v.reset(OpAMD64VPADDUSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29561,12 +29660,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // result: (VPADDUSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) + v.reset(OpAMD64VPADDUSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29579,12 +29678,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // result: (VPADDUSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) + v.reset(OpAMD64VPADDUSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29597,12 +29696,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // result: (VPADDUSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) + v.reset(OpAMD64VPADDUSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29615,12 +29714,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // result: (VPADDUSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) + v.reset(OpAMD64VPADDUSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -30072,198 +30171,6 @@ func rewriteValueAMD64_OpAndNotMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -33709,45 +33616,111 @@ func rewriteValueAMD64_OpDivMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpDotProdPairsMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DotProdBroadcastFloat32x4 x y) - // result: (VDPPS128 [127] x y) + b := v.Block + // match: (DotProdPairsMaskedInt16x16 x y mask) + // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDPPS128) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpDotProdPairsMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DotProdBroadcastFloat32x8 x y) - // result: (VDPPS256 [127] x y) + b := v.Block + // match: (DotProdPairsMaskedInt16x32 x y mask) + // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDPPS256) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpDotProdPairsMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DotProdBroadcastFloat64x2 x y) - // result: (VDPPD128 [127] x y) + b := v.Block + // match: (DotProdPairsMaskedInt16x8 x y mask) + // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDPPD128) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (DotProdPairsSaturatedMaskedUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (DotProdPairsSaturatedMaskedUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (DotProdPairsSaturatedMaskedUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } @@ -35694,366 +35667,6 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat32x16 x y z mask) - // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat32x4 x y z mask) - // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat32x8 x y z mask) - // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat64x2 x y z mask) - // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat64x4 x y z mask) - // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat64x8 x y z mask) - // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat32x16 x y z mask) - // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat32x4 x y z mask) - // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat32x8 x y z mask) - // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat64x2 x y z mask) - // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat64x4 x y z mask) - // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat64x8 x y z mask) - // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat32x16 x y z mask) - // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat32x4 x y z mask) - // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat32x8 x y z mask) - // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat64x2 x y z mask) - // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat64x4 x y z mask) - // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat64x8 x y z mask) - // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -44852,192 +44465,270 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } return false } -func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedInt64x2 x y mask) - // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulAddMaskedFloat32x16 x y z mask) + // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedInt64x4 x y mask) - // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulAddMaskedFloat32x4 x y z mask) + // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedInt64x8 x y mask) - // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulAddMaskedFloat32x8 x y z mask) + // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedUint64x2 x y mask) - // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulAddMaskedFloat64x2 x y z mask) + // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked128) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedUint64x4 x y mask) - // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulAddMaskedFloat64x4 x y z mask) + // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked256) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedUint64x8 x y mask) - // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulAddMaskedFloat64x8 x y z mask) + // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked512) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedInt16x16 x y mask) - // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MulAddSubMaskedFloat32x16 x y z mask) + // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedInt16x32 x y mask) - // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MulAddSubMaskedFloat32x4 x y z mask) + // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedInt16x8 x y mask) - // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MulAddSubMaskedFloat32x8 x y z mask) + // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedUint16x16 x y mask) - // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MulAddSubMaskedFloat64x2 x y z mask) + // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulAddSubMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulAddSubMaskedFloat64x4 x y z mask) + // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulAddSubMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulAddSubMaskedFloat64x8 x y z mask) + // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedInt16x16 x y mask) + // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) + v.reset(OpAMD64VPMULHWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedUint16x32 x y mask) + // match: (MulHighMaskedInt16x32 x y mask) // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -45050,12 +44741,12 @@ func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedUint16x8 x y mask) + // match: (MulHighMaskedInt16x8 x y mask) // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -45338,6 +45029,288 @@ func rewriteValueAMD64_OpMulMaskedInt64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMulMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat32x16 x y z mask) + // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat32x4 x y z mask) + // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat32x8 x y z mask) + // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat64x2 x y z mask) + // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat64x4 x y z mask) + // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat64x8 x y z mask) + // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -46722,6 +46695,390 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool { return true } } +func rewriteValueAMD64_OpOnesCountMaskedInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpOrMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -46938,60 +47295,6 @@ func rewriteValueAMD64_OpOrMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdMaskedInt16x16 x y mask) - // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdMaskedInt16x32 x y mask) - // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdMaskedInt16x8 x y mask) - // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpPermute2MaskedFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] @@ -48054,390 +48357,198 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPopCountMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (ReciprocalMaskedFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) + v.reset(OpAMD64VRCP14PSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (ReciprocalMaskedFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) + v.reset(OpAMD64VRCP14PSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (ReciprocalMaskedFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) + v.reset(OpAMD64VRCP14PSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (ReciprocalMaskedFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) + v.reset(OpAMD64VRCP14PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (ReciprocalMaskedFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) + v.reset(OpAMD64VRCP14PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (ReciprocalMaskedFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) + v.reset(OpAMD64VRCP14PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt8x64(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (ReciprocalSqrtMaskedFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) + v.reset(OpAMD64VRSQRT14PSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (ReciprocalSqrtMaskedFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) + v.reset(OpAMD64VRSQRT14PSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (ReciprocalSqrtMaskedFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) + v.reset(OpAMD64VRSQRT14PSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (ReciprocalSqrtMaskedFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) + v.reset(OpAMD64VRSQRT14PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (ReciprocalSqrtMaskedFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) + v.reset(OpAMD64VRSQRT14PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (ReciprocalSqrtMaskedFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) + v.reset(OpAMD64VRSQRT14PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -49302,9 +49413,21 @@ func rewriteValueAMD64_OpRotateRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundToEvenFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat32x4 x) + // match: (RoundToEvenFloat32x4 x) // result: (VROUNDPS128 [0] x) for { x := v_0 @@ -49314,9 +49437,9 @@ func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat32x8 x) + // match: (RoundToEvenFloat32x8 x) // result: (VROUNDPS256 [0] x) for { x := v_0 @@ -49326,9 +49449,9 @@ func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat64x2 x) + // match: (RoundToEvenFloat64x2 x) // result: (VROUNDPD128 [0] x) for { x := v_0 @@ -49338,9 +49461,9 @@ func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat64x4 x) + // match: (RoundToEvenFloat64x4 x) // result: (VROUNDPD256 [0] x) for { x := v_0 @@ -49350,9 +49473,9 @@ func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat32x16 [a] x) + // match: (RoundToEvenScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49363,9 +49486,9 @@ func rewriteValueAMD64_OpRoundScaledFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat32x4 [a] x) + // match: (RoundToEvenScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49376,9 +49499,9 @@ func rewriteValueAMD64_OpRoundScaledFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat32x8 [a] x) + // match: (RoundToEvenScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49389,9 +49512,9 @@ func rewriteValueAMD64_OpRoundScaledFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat64x2 [a] x) + // match: (RoundToEvenScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49402,9 +49525,9 @@ func rewriteValueAMD64_OpRoundScaledFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat64x4 [a] x) + // match: (RoundToEvenScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49415,9 +49538,9 @@ func rewriteValueAMD64_OpRoundScaledFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat64x8 [a] x) + // match: (RoundToEvenScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49428,11 +49551,11 @@ func rewriteValueAMD64_OpRoundScaledFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat32x16 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49446,11 +49569,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat32x4 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49464,11 +49587,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat32x8 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49482,11 +49605,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat64x2 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49500,11 +49623,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat64x4 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49518,11 +49641,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat64x8 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49536,9 +49659,9 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat32x16 [a] x) + // match: (RoundToEvenScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49549,9 +49672,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat32x4 [a] x) + // match: (RoundToEvenScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49562,9 +49685,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat32x8 [a] x) + // match: (RoundToEvenScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49575,9 +49698,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat64x2 [a] x) + // match: (RoundToEvenScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49588,9 +49711,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat64x4 [a] x) + // match: (RoundToEvenScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49601,9 +49724,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat64x8 [a] x) + // match: (RoundToEvenScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49614,11 +49737,11 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat32x16 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49632,11 +49755,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat32x4 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49650,11 +49773,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat32x8 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49668,11 +49791,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat64x2 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49686,11 +49809,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat64x4 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49704,11 +49827,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat64x8 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49722,18 +49845,6 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundToEven(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundToEven x) - // result: (ROUNDSD [0] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -51062,180 +51173,6 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } -func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddDotProdMaskedInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddDotProdMaskedInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddDotProdMaskedInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpScaleMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -57918,12 +57855,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // result: (VPSUBUSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) + v.reset(OpAMD64VPSUBUSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57936,12 +57873,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPSUBUSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) + v.reset(OpAMD64VPSUBUSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57954,12 +57891,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // result: (VPSUBUSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) + v.reset(OpAMD64VPSUBUSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57972,12 +57909,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // result: (VPSUBUSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) + v.reset(OpAMD64VPSUBUSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57990,12 +57927,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // result: (VPSUBUSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) + v.reset(OpAMD64VPSUBUSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -58008,12 +57945,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // result: (VPSUBUSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) + v.reset(OpAMD64VPSUBUSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -58452,66 +58389,6 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpXorMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index c7f97e03a0dd83..4be74d913666eb 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -11,30 +11,30 @@ import ( const simdPackage = "simd" func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { - addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Absolute", opLen1(ssa.OpAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.Absolute", opLen1(ssa.OpAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.Absolute", opLen1(ssa.OpAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Absolute", opLen1(ssa.OpAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Abs", opLen1(ssa.OpAbsInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Abs", opLen1(ssa.OpAbsInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Abs", opLen1(ssa.OpAbsInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Abs", opLen1(ssa.OpAbsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Abs", opLen1(ssa.OpAbsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Abs", opLen1(ssa.OpAbsInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Abs", opLen1(ssa.OpAbsInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Abs", opLen1(ssa.OpAbsInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Abs", opLen1(ssa.OpAbsInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Abs", opLen1(ssa.OpAbsInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Abs", opLen1(ssa.OpAbsInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Abs", opLen1(ssa.OpAbsInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) @@ -65,12 +65,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AddDotProd", opLen3(ssa.OpAddDotProdInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AddDotProd", opLen3(ssa.OpAddDotProdInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AddDotProd", opLen3(ssa.OpAddDotProdInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddMasked", opLen3(ssa.OpAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddMasked", opLen3(ssa.OpAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.AddMasked", opLen3(ssa.OpAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -215,30 +227,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) @@ -321,6 +309,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.CopySign", opLen2(ssa.OpCopySignInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.CopySign", opLen2(ssa.OpCopySignInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.CopySign", opLen2(ssa.OpCopySignInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.CopySign", opLen2(ssa.OpCopySignInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.CopySign", opLen2(ssa.OpCopySignInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.CopySign", opLen2(ssa.OpCopySignInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) @@ -333,9 +327,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.DivMasked", opLen3(ssa.OpDivMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.DivMasked", opLen3(ssa.OpDivMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.DivMasked", opLen3(ssa.OpDivMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) @@ -454,42 +457,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) @@ -943,34 +910,49 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.Mul", opLen2(ssa.OpMulInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Mul", opLen2(ssa.OpMulInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Mul", opLen2(ssa.OpMulInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Mul", opLen2(ssa.OpMulUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Mul", opLen2(ssa.OpMulUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Mul", opLen2(ssa.OpMulUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Mul", opLen2(ssa.OpMulUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Mul", opLen2(ssa.OpMulUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Mul", opLen2(ssa.OpMulUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Mul", opLen2(ssa.OpMulUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Mul", opLen2(ssa.OpMulUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Mul", opLen2(ssa.OpMulUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAdd", opLen3(ssa.OpMulAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAdd", opLen3(ssa.OpMulAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAdd", opLen3(ssa.OpMulAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAdd", opLen3(ssa.OpMulAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAdd", opLen3(ssa.OpMulAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAdd", opLen3(ssa.OpMulAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -986,6 +968,27 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.MulMasked", opLen3(ssa.OpMulMaskedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.MulMasked", opLen3(ssa.OpMulMaskedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.MulMasked", opLen3(ssa.OpMulMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulMasked", opLen3(ssa.OpMulMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulMasked", opLen3(ssa.OpMulMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulMasked", opLen3(ssa.OpMulMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MulMasked", opLen3(ssa.OpMulMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MulMasked", opLen3(ssa.OpMulMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MulMasked", opLen3(ssa.OpMulMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MulMasked", opLen3(ssa.OpMulMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MulMasked", opLen3(ssa.OpMulMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MulMasked", opLen3(ssa.OpMulMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -1046,6 +1049,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.OnesCount", opLen1(ssa.OpOnesCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.OnesCount", opLen1(ssa.OpOnesCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.OnesCount", opLen1(ssa.OpOnesCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.OnesCount", opLen1(ssa.OpOnesCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.OnesCount", opLen1(ssa.OpOnesCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.OnesCount", opLen1(ssa.OpOnesCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.OnesCount", opLen1(ssa.OpOnesCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.OnesCount", opLen1(ssa.OpOnesCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.OnesCount", opLen1(ssa.OpOnesCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.OnesCount", opLen1(ssa.OpOnesCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.OnesCount", opLen1(ssa.OpOnesCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.OnesCount", opLen1(ssa.OpOnesCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.OnesCount", opLen1(ssa.OpOnesCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.OnesCount", opLen1(ssa.OpOnesCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.OnesCount", opLen1(ssa.OpOnesCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.OnesCount", opLen1(ssa.OpOnesCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.OnesCount", opLen1(ssa.OpOnesCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.OnesCount", opLen1(ssa.OpOnesCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.OnesCount", opLen1(ssa.OpOnesCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.OnesCount", opLen1(ssa.OpOnesCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.OnesCount", opLen1(ssa.OpOnesCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.OnesCount", opLen1(ssa.OpOnesCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.OnesCount", opLen1(ssa.OpOnesCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.OnesCount", opLen1(ssa.OpOnesCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Or", opLen2(ssa.OpOrInt8x64, types.TypeVec512), sys.AMD64) @@ -1082,12 +1133,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.OrMasked", opLen3(ssa.OpOrMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.OrMasked", opLen3(ssa.OpOrMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.OrMasked", opLen3(ssa.OpOrMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) @@ -1196,54 +1241,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Reciprocal", opLen1(ssa.OpReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Reciprocal", opLen1(ssa.OpReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Reciprocal", opLen1(ssa.OpReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Reciprocal", opLen1(ssa.OpReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Reciprocal", opLen1(ssa.OpReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Reciprocal", opLen1(ssa.OpReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1340,52 +1361,34 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEven", opLen1(ssa.OpRoundToEvenFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEven", opLen1(ssa.OpRoundToEvenFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEven", opLen1(ssa.OpRoundToEvenFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEven", opLen1(ssa.OpRoundToEvenFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Scale", opLen2(ssa.OpScaleFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Scale", opLen2(ssa.OpScaleFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Scale", opLen2(ssa.OpScaleFloat32x16, types.TypeVec512), sys.AMD64) @@ -1734,12 +1737,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) @@ -1878,12 +1875,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Xor", opLen2(ssa.OpXorInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 2138271769db58..712ee70d5195be 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -4,153 +4,153 @@ package simd -/* Absolute */ +/* Abs */ -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX -func (x Int8x16) Absolute() Int8x16 +func (x Int8x16) Abs() Int8x16 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX2 -func (x Int8x32) Absolute() Int8x32 +func (x Int8x32) Abs() Int8x32 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x64) Absolute() Int8x64 +func (x Int8x64) Abs() Int8x64 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX -func (x Int16x8) Absolute() Int16x8 +func (x Int16x8) Abs() Int16x8 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX2 -func (x Int16x16) Absolute() Int16x16 +func (x Int16x16) Abs() Int16x16 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x32) Absolute() Int16x32 +func (x Int16x32) Abs() Int16x32 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX -func (x Int32x4) Absolute() Int32x4 +func (x Int32x4) Abs() Int32x4 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX2 -func (x Int32x8) Absolute() Int32x8 +func (x Int32x8) Abs() Int32x8 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x16) Absolute() Int32x16 +func (x Int32x16) Abs() Int32x16 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x2) Absolute() Int64x2 +func (x Int64x2) Abs() Int64x2 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x4) Absolute() Int64x4 +func (x Int64x4) Abs() Int64x4 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x8) Absolute() Int64x8 +func (x Int64x8) Abs() Int64x8 -/* AbsoluteMasked */ +/* AbsMasked */ -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x16) AbsoluteMasked(mask Mask8x16) Int8x16 +func (x Int8x16) AbsMasked(mask Mask8x16) Int8x16 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x32) AbsoluteMasked(mask Mask8x32) Int8x32 +func (x Int8x32) AbsMasked(mask Mask8x32) Int8x32 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x64) AbsoluteMasked(mask Mask8x64) Int8x64 +func (x Int8x64) AbsMasked(mask Mask8x64) Int8x64 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x8) AbsoluteMasked(mask Mask16x8) Int16x8 +func (x Int16x8) AbsMasked(mask Mask16x8) Int16x8 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x16) AbsoluteMasked(mask Mask16x16) Int16x16 +func (x Int16x16) AbsMasked(mask Mask16x16) Int16x16 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x32) AbsoluteMasked(mask Mask16x32) Int16x32 +func (x Int16x32) AbsMasked(mask Mask16x32) Int16x32 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x4) AbsoluteMasked(mask Mask32x4) Int32x4 +func (x Int32x4) AbsMasked(mask Mask32x4) Int32x4 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x8) AbsoluteMasked(mask Mask32x8) Int32x8 +func (x Int32x8) AbsMasked(mask Mask32x8) Int32x8 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x16) AbsoluteMasked(mask Mask32x16) Int32x16 +func (x Int32x16) AbsMasked(mask Mask32x16) Int32x16 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x2) AbsoluteMasked(mask Mask64x2) Int64x2 +func (x Int64x2) AbsMasked(mask Mask64x2) Int64x2 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x4) AbsoluteMasked(mask Mask64x4) Int64x4 +func (x Int64x4) AbsMasked(mask Mask64x4) Int64x4 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x8) AbsoluteMasked(mask Mask64x8) Int64x8 +func (x Int64x8) AbsMasked(mask Mask64x8) Int64x8 /* Add */ @@ -304,45 +304,125 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) Add(y Uint64x8) Uint64x8 -/* AddDotProd */ +/* AddDotProdPairsSaturated */ -// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. // -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x4) AddDotProd(y Int16x8, z Int16x8) Int32x4 +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x4) AddDotProdPairsSaturated(y Int16x8, z Int16x8) Int32x4 + +// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x8) AddDotProdPairsSaturated(y Int16x16, z Int16x16) Int32x8 + +// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProdPairsSaturated(y Int16x32, z Int16x32) Int32x16 + +/* AddDotProdPairsSaturatedMasked */ + +// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x4) AddDotProdPairsSaturatedMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 + +// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x8) AddDotProdPairsSaturatedMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 + +// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProdPairsSaturatedMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 + +/* AddDotProdQuadruple */ + +// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSD, CPU Feature: AVXVNNI +func (x Int8x16) AddDotProdQuadruple(y Uint8x16, z Int32x4) Int32x4 + +// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSD, CPU Feature: AVXVNNI +func (x Int8x32) AddDotProdQuadruple(y Uint8x32, z Int32x8) Int32x8 + +// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadruple(y Uint8x64, z Int32x16) Int32x16 + +/* AddDotProdQuadrupleMasked */ + +// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x16) AddDotProdQuadrupleMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 + +// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x32) AddDotProdQuadrupleMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 + +// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadrupleMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 + +/* AddDotProdQuadrupleSaturated */ + +// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x16) AddDotProdQuadrupleSaturated(y Uint8x16, z Int32x4) Int32x4 -// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. // -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x8) AddDotProd(y Int16x16, z Int16x16) Int32x8 +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x32) AddDotProdQuadrupleSaturated(y Uint8x32, z Int32x8) Int32x8 -// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) AddDotProd(y Int16x32, z Int16x32) Int32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadrupleSaturated(y Uint8x64, z Int32x16) Int32x16 -/* AddDotProdMasked */ +/* AddDotProdQuadrupleSaturatedMasked */ -// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x4) AddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x16) AddDotProdQuadrupleSaturatedMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 -// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x8) AddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x32) AddDotProdQuadrupleSaturatedMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 -// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) AddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadrupleSaturatedMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 /* AddMasked */ @@ -678,32 +758,32 @@ func (x Int16x32) AddSaturated(y Int16x32) Int16x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX +// Asm: VPADDUSB, CPU Feature: AVX func (x Uint8x16) AddSaturated(y Uint8x16) Uint8x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX2 +// Asm: VPADDUSB, CPU Feature: AVX2 func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX +// Asm: VPADDUSW, CPU Feature: AVX func (x Uint16x8) AddSaturated(y Uint16x8) Uint16x8 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX2 +// Asm: VPADDUSW, CPU Feature: AVX2 func (x Uint16x16) AddSaturated(y Uint16x16) Uint16x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 /* AddSaturatedMasked */ @@ -754,42 +834,42 @@ func (x Int16x32) AddSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x16) AddSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x32) AddSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x64) AddSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x8) AddSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x16) AddSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x32) AddSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* AddSub */ @@ -1230,158 +1310,6 @@ func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* ApproximateReciprocal */ - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCPPS, CPU Feature: AVX -func (x Float32x4) ApproximateReciprocal() Float32x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCPPS, CPU Feature: AVX -func (x Float32x8) ApproximateReciprocal() Float32x8 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocal() Float32x16 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocal() Float64x2 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocal() Float64x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocal() Float64x8 - -/* ApproximateReciprocalMasked */ - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalMasked(mask Mask32x4) Float32x4 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalMasked(mask Mask32x8) Float32x8 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalMasked(mask Mask32x16) Float32x16 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalMasked(mask Mask64x2) Float64x2 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalMasked(mask Mask64x4) Float64x4 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalMasked(mask Mask64x8) Float64x8 - -/* ApproximateReciprocalOfSqrt */ - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 - -/* ApproximateReciprocalOfSqrtMasked */ - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalOfSqrtMasked(mask Mask32x4) Float32x4 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalOfSqrtMasked(mask Mask32x8) Float32x8 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalOfSqrtMasked(mask Mask32x16) Float32x16 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalOfSqrtMasked(mask Mask64x2) Float64x2 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalOfSqrtMasked(mask Mask64x4) Float64x4 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalOfSqrtMasked(mask Mask64x8) Float64x8 - /* Average */ // Average computes the rounded average of corresponding elements. @@ -1942,6 +1870,44 @@ func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 // Asm: VCVTPS2UDQ, CPU Feature: AVX512F func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 +/* CopySign */ + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX +func (x Int8x16) CopySign(y Int8x16) Int8x16 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX2 +func (x Int8x32) CopySign(y Int8x32) Int8x32 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNW, CPU Feature: AVX +func (x Int16x8) CopySign(y Int16x8) Int16x8 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNW, CPU Feature: AVX2 +func (x Int16x16) CopySign(y Int16x16) Int16x16 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGND, CPU Feature: AVX +func (x Int32x4) CopySign(y Int32x4) Int32x4 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGND, CPU Feature: AVX2 +func (x Int32x8) CopySign(y Int32x8) Int32x8 + /* Div */ // Div divides elements of two vectors. @@ -2018,22 +1984,97 @@ func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 -/* DotProdBroadcast */ +/* DotProdPairs */ + +// DotProdPairs multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) DotProdPairs(y Int16x8) Int32x4 + +// DotProdPairs multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) DotProdPairs(y Int16x16) Int32x8 + +// DotProdPairs multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 + +/* DotProdPairsMasked */ + +// DotProdPairsMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x8) DotProdPairsMasked(y Int16x8, mask Mask16x8) Int32x4 + +// DotProdPairsMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x16) DotProdPairsMasked(y Int16x16, mask Mask16x16) Int32x8 + +// DotProdPairsMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) DotProdPairsMasked(y Int16x32, mask Mask16x32) Int32x16 + +/* DotProdPairsSaturated */ + +// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) DotProdPairsSaturated(y Int8x16) Int16x8 + +// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) DotProdPairsSaturated(y Int8x32) Int16x16 + +// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 + +/* DotProdPairsSaturatedMasked */ -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. // -// Asm: VDPPS, CPU Feature: AVX -func (x Float32x4) DotProdBroadcast(y Float32x4) Float32x4 +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x16) DotProdPairsSaturatedMasked(y Int8x16, mask Mask16x8) Int16x8 -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. // -// Asm: VDPPS, CPU Feature: AVX -func (x Float32x8) DotProdBroadcast(y Float32x8) Float32x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x32) DotProdPairsSaturatedMasked(y Int8x32, mask Mask16x16) Int16x16 -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. // -// Asm: VDPPD, CPU Feature: AVX -func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x64) DotProdPairsSaturatedMasked(y Int8x64, mask Mask16x32) Int16x32 /* Equal */ @@ -2803,235 +2844,7 @@ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 -/* FusedMultiplyAdd */ - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddMasked */ - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* FusedMultiplyAddSub */ - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSubMasked */ - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* FusedMultiplySubAdd */ - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAddMasked */ - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* GaloisFieldAffineTransform */ +/* GaloisFieldAffineTransform */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; @@ -5822,193 +5635,268 @@ func (x Int64x4) Mul(y Int64x4) Int64x4 // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) Mul(y Int64x8) Int64x8 -/* MulEvenWiden */ +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VPMULLW, CPU Feature: AVX +func (x Uint16x8) Mul(y Uint16x8) Uint16x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Uint16x16) Mul(y Uint16x16) Uint16x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x32) Mul(y Uint16x32) Uint16x32 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 +// Asm: VPMULLD, CPU Feature: AVX +func (x Uint32x4) Mul(y Uint32x4) Uint32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Uint32x8) Mul(y Uint32x8) Uint32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x16) Mul(y Uint32x16) Uint32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x2) Mul(y Uint64x2) Uint64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x4) Mul(y Uint64x4) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x8) Mul(y Uint64x8) Uint64x8 + +/* MulAdd */ + +// MulAdd performs a fused (x * y) + z. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulAdd(y Float32x4, z Float32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulAdd(y Float32x8, z Float32x8) Float32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAdd performs a fused (x * y) + z. +// +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulAdd(y Float32x16, z Float32x16) Float32x16 + +// MulAdd performs a fused (x * y) + z. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulAdd(y Float64x2, z Float64x2) Float64x2 -/* MulEvenWidenMasked */ +// MulAdd performs a fused (x * y) + z. +// +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulAdd(y Float64x4, z Float64x4) Float64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAdd performs a fused (x * y) + z. +// +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 + +/* MulAddMasked */ + +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x2) MulEvenWidenMasked(y Int64x2, mask Mask64x2) Int64x2 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x4) MulEvenWidenMasked(y Int64x4, mask Mask64x4) Int64x4 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x8) MulEvenWidenMasked(y Int64x8, mask Mask64x8) Int64x8 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, mask Mask64x2) Uint64x2 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, mask Mask64x4) Uint64x4 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, mask Mask64x8) Uint64x8 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 -/* MulHigh */ +/* MulAddSub */ -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x4) MulAddSub(y Float32x4, z Float32x4) Float32x4 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x8) MulAddSub(y Float32x8, z Float32x8) Float32x8 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x16) MulAddSub(y Float32x16, z Float32x16) Float32x16 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x2) MulAddSub(y Float64x2, z Float64x2) Float64x2 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x4) MulAddSub(y Float64x4, z Float64x4) Float64x4 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 -/* MulHighMasked */ +/* MulAddSubMasked */ -// MulHighMasked multiplies elements and stores the high part of the result. +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x4) MulAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// MulHighMasked multiplies elements and stores the high part of the result. +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x8) MulAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// MulHighMasked multiplies elements and stores the high part of the result. +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x16) MulAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 + +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x2) MulAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 + +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x4) MulAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 + +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x8) MulAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 + +/* MulEvenWiden */ + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX512BW +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +/* MulHighMasked */ // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 +func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// Asm: VPMULHW, CPU Feature: AVX512BW +func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 +func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 /* MulMasked */ @@ -6117,6 +6005,145 @@ func (x Int64x4) MulMasked(y Int64x4, mask Mask64x4) Int64x4 // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulMasked(y Int64x8, mask Mask64x8) Int64x8 +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x8) MulMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x16) MulMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x32) MulMasked(y Uint16x32, mask Mask16x32) Uint16x32 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x4) MulMasked(y Uint32x4, mask Mask32x4) Uint32x4 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x8) MulMasked(y Uint32x8, mask Mask32x8) Uint32x8 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x16) MulMasked(y Uint32x16, mask Mask32x16) Uint32x16 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x2) MulMasked(y Uint64x2, mask Mask64x2) Uint64x2 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x4) MulMasked(y Uint64x4, mask Mask64x4) Uint64x4 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x8) MulMasked(y Uint64x8, mask Mask64x8) Uint64x8 + +/* MulSubAdd */ + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulSubAdd(y Float32x4, z Float32x4) Float32x4 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulSubAdd(y Float32x8, z Float32x8) Float32x8 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulSubAdd(y Float32x16, z Float32x16) Float32x16 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulSubAdd(y Float64x2, z Float64x2) Float64x2 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulSubAdd(y Float64x4, z Float64x4) Float64x4 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 + +/* MulSubAddMasked */ + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulSubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulSubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulSubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulSubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulSubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulSubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 + /* NotEqual */ // NotEqual compares for inequality. @@ -6324,162 +6351,454 @@ func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 +// Asm: VPCMPB, CPU Feature: AVX512BW +func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPB, CPU Feature: AVX512BW +func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPW, CPU Feature: AVX512BW +func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPW, CPU Feature: AVX512BW +func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPW, CPU Feature: AVX512BW +func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPD, CPU Feature: AVX512F +func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPD, CPU Feature: AVX512F +func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPD, CPU Feature: AVX512F +func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPQ, CPU Feature: AVX512F +func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPQ, CPU Feature: AVX512F +func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPQ, CPU Feature: AVX512F +func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUB, CPU Feature: AVX512BW +func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUB, CPU Feature: AVX512BW +func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUB, CPU Feature: AVX512BW +func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUW, CPU Feature: AVX512BW +func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUW, CPU Feature: AVX512BW +func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUW, CPU Feature: AVX512BW +func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUD, CPU Feature: AVX512F +func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUD, CPU Feature: AVX512F +func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUD, CPU Feature: AVX512F +func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUQ, CPU Feature: AVX512F +func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUQ, CPU Feature: AVX512F +func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUQ, CPU Feature: AVX512F +func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 + +/* OnesCount */ + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x16) OnesCount() Int8x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x32) OnesCount() Int8x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x64) OnesCount() Int8x64 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x8) OnesCount() Int16x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x16) OnesCount() Int16x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x32) OnesCount() Int16x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x4) OnesCount() Int32x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x8) OnesCount() Int32x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x16) OnesCount() Int32x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x2) OnesCount() Int64x2 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x4) OnesCount() Int64x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x8) OnesCount() Int64x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x16) OnesCount() Uint8x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x32) OnesCount() Uint8x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x64) OnesCount() Uint8x64 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x8) OnesCount() Uint16x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x16) OnesCount() Uint16x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x32) OnesCount() Uint16x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x4) OnesCount() Uint32x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x8) OnesCount() Uint32x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x16) OnesCount() Uint32x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x2) OnesCount() Uint64x2 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x4) OnesCount() Uint64x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x8) OnesCount() Uint64x8 + +/* OnesCountMasked */ + +// OnesCountMasked counts the number of set bits in each element. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x16) OnesCountMasked(mask Mask8x16) Int8x16 + +// OnesCountMasked counts the number of set bits in each element. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x32) OnesCountMasked(mask Mask8x32) Int8x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x64) OnesCountMasked(mask Mask8x64) Int8x64 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x8) OnesCountMasked(mask Mask16x8) Int16x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x16) OnesCountMasked(mask Mask16x16) Int16x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x32) OnesCountMasked(mask Mask16x32) Int16x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x4) OnesCountMasked(mask Mask32x4) Int32x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x8) OnesCountMasked(mask Mask32x8) Int32x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x16) OnesCountMasked(mask Mask32x16) Int32x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x2) OnesCountMasked(mask Mask64x2) Int64x2 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x4) OnesCountMasked(mask Mask64x4) Int64x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x8) OnesCountMasked(mask Mask64x8) Int64x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x16) OnesCountMasked(mask Mask8x16) Uint8x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x32) OnesCountMasked(mask Mask8x32) Uint8x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x64) OnesCountMasked(mask Mask8x64) Uint8x64 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x8) OnesCountMasked(mask Mask16x8) Uint16x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x16) OnesCountMasked(mask Mask16x16) Uint16x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x32) OnesCountMasked(mask Mask16x32) Uint16x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x4) OnesCountMasked(mask Mask32x4) Uint32x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x8) OnesCountMasked(mask Mask32x8) Uint32x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x16) OnesCountMasked(mask Mask32x16) Uint32x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x2) OnesCountMasked(mask Mask64x2) Uint64x2 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x4) OnesCountMasked(mask Mask64x4) Uint64x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x8) OnesCountMasked(mask Mask64x8) Uint64x8 /* Or */ @@ -6689,52 +7008,6 @@ func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* PairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 - -/* PairDotProdMasked */ - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 - /* Permute */ // Permute performs a full permutation of vector x using indices: @@ -7599,365 +7872,225 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512F -func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512F -func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 - -/* PopCount */ - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) PopCount() Int8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) PopCount() Int8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) PopCount() Int8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) PopCount() Int16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) PopCount() Int16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) PopCount() Int16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) PopCount() Int32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) PopCount() Int32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) PopCount() Int32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) PopCount() Int64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) PopCount() Int64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) PopCount() Int64x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) PopCount() Uint8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) PopCount() Uint8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) PopCount() Uint8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) PopCount() Uint16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) PopCount() Uint16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) PopCount() Uint16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) PopCount() Uint32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) PopCount() Uint32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) PopCount() Uint32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) PopCount() Uint64x2 - -// PopCount counts the number of set bits in each element. +// This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) PopCount() Uint64x4 +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) PopCount() Uint64x8 - -/* PopCountMasked */ +// This operation is applied selectively under a write mask. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) PopCountMasked(mask Mask8x16) Int8x16 +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) PopCountMasked(mask Mask8x32) Int8x32 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) PopCountMasked(mask Mask8x64) Int8x64 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) PopCountMasked(mask Mask16x8) Int16x8 +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) PopCountMasked(mask Mask16x16) Int16x16 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) PopCountMasked(mask Mask16x32) Int16x32 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 + +/* Reciprocal */ -// PopCountMasked counts the number of set bits in each element. +// Reciprocal computes an approximate reciprocal of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRCPPS, CPU Feature: AVX +func (x Float32x4) Reciprocal() Float32x4 + +// Reciprocal computes an approximate reciprocal of each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) PopCountMasked(mask Mask32x4) Int32x4 +// Asm: VRCPPS, CPU Feature: AVX +func (x Float32x8) Reciprocal() Float32x8 -// PopCountMasked counts the number of set bits in each element. +// Reciprocal computes an approximate reciprocal of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x16) Reciprocal() Float32x16 + +// Reciprocal computes an approximate reciprocal of each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) PopCountMasked(mask Mask32x8) Int32x8 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x2) Reciprocal() Float64x2 -// PopCountMasked counts the number of set bits in each element. +// Reciprocal computes an approximate reciprocal of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x4) Reciprocal() Float64x4 + +// Reciprocal computes an approximate reciprocal of each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) PopCountMasked(mask Mask32x16) Int32x16 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x8) Reciprocal() Float64x8 + +/* ReciprocalMasked */ -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) PopCountMasked(mask Mask64x2) Int64x2 +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x4) ReciprocalMasked(mask Mask32x4) Float32x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) PopCountMasked(mask Mask64x4) Int64x4 +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x8) ReciprocalMasked(mask Mask32x8) Float32x8 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) PopCountMasked(mask Mask64x8) Int64x8 +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x16) ReciprocalMasked(mask Mask32x16) Float32x16 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) PopCountMasked(mask Mask8x16) Uint8x16 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x2) ReciprocalMasked(mask Mask64x2) Float64x2 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) PopCountMasked(mask Mask8x32) Uint8x32 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x4) ReciprocalMasked(mask Mask64x4) Float64x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) PopCountMasked(mask Mask8x64) Uint8x64 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x8) ReciprocalMasked(mask Mask64x8) Float64x8 -// PopCountMasked counts the number of set bits in each element. +/* ReciprocalSqrt */ + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x4) ReciprocalSqrt() Float32x4 + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) PopCountMasked(mask Mask16x8) Uint16x8 +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x8) ReciprocalSqrt() Float32x8 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x16) ReciprocalSqrt() Float32x16 + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) PopCountMasked(mask Mask16x16) Uint16x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x2) ReciprocalSqrt() Float64x2 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x4) ReciprocalSqrt() Float64x4 + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) PopCountMasked(mask Mask16x32) Uint16x32 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x8) ReciprocalSqrt() Float64x8 -// PopCountMasked counts the number of set bits in each element. +/* ReciprocalSqrtMasked */ + +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) PopCountMasked(mask Mask32x4) Uint32x4 +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x4) ReciprocalSqrtMasked(mask Mask32x4) Float32x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) PopCountMasked(mask Mask32x8) Uint32x8 +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x8) ReciprocalSqrtMasked(mask Mask32x8) Float32x8 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) PopCountMasked(mask Mask32x16) Uint32x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x16) ReciprocalSqrtMasked(mask Mask32x16) Float32x16 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) PopCountMasked(mask Mask64x2) Uint64x2 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x2) ReciprocalSqrtMasked(mask Mask64x2) Float64x2 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) PopCountMasked(mask Mask64x4) Uint64x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x4) ReciprocalSqrtMasked(mask Mask64x4) Float64x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) PopCountMasked(mask Mask64x8) Uint64x8 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 /* RotateAllLeft */ @@ -8647,353 +8780,227 @@ func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* Round */ +/* RoundToEven */ -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 +func (x Float32x4) RoundToEven() Float32x4 -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 +func (x Float32x8) RoundToEven() Float32x8 -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 +func (x Float64x2) RoundToEven() Float64x2 -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 +func (x Float64x4) RoundToEven() Float64x4 -/* RoundScaled */ +/* RoundToEvenScaled */ -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundScaled(prec uint8) Float32x4 +func (x Float32x4) RoundToEvenScaled(prec uint8) Float32x4 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundScaled(prec uint8) Float32x8 +func (x Float32x8) RoundToEvenScaled(prec uint8) Float32x8 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundScaled(prec uint8) Float32x16 +func (x Float32x16) RoundToEvenScaled(prec uint8) Float32x16 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundScaled(prec uint8) Float64x2 +func (x Float64x2) RoundToEvenScaled(prec uint8) Float64x2 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundScaled(prec uint8) Float64x4 +func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundScaled(prec uint8) Float64x8 +func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 -/* RoundScaledMasked */ +/* RoundToEvenScaledMasked */ -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundScaledMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundScaledMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundScaledMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundScaledMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundScaledMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundScaledMasked(prec uint8, mask Mask64x8) Float64x8 +func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 -/* RoundScaledResidue */ +/* RoundToEvenScaledResidue */ -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) RoundScaledResidue(prec uint8) Float32x4 +func (x Float32x4) RoundToEvenScaledResidue(prec uint8) Float32x4 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) RoundScaledResidue(prec uint8) Float32x8 +func (x Float32x8) RoundToEvenScaledResidue(prec uint8) Float32x8 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) RoundScaledResidue(prec uint8) Float32x16 +func (x Float32x16) RoundToEvenScaledResidue(prec uint8) Float32x16 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) RoundScaledResidue(prec uint8) Float64x2 +func (x Float64x2) RoundToEvenScaledResidue(prec uint8) Float64x2 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) RoundScaledResidue(prec uint8) Float64x4 +func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) RoundScaledResidue(prec uint8) Float64x8 +func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 -/* RoundScaledResidueMasked */ +/* RoundToEvenScaledResidueMasked */ -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) RoundScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) RoundScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) RoundScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) RoundScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) RoundScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) RoundScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 - -/* SaturatedAddDotProd */ - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 - -/* SaturatedAddDotProdMasked */ - -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 - -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 - -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 - -/* SaturatedUnsignedSignedPairDotProd */ - -// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 - -// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 - -// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 - -/* SaturatedUnsignedSignedPairDotProdMasked */ - -// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, mask Mask16x8) Int16x8 - -// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, mask Mask16x16) Int16x16 - -// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask16x32) Int16x32 - -/* SaturatedUnsignedSignedQuadDotProdAccumulate */ - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 - -/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 +func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Scale */ @@ -11381,44 +11388,6 @@ func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* Sign */ - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX -func (x Int8x16) Sign(y Int8x16) Int8x16 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX2 -func (x Int8x32) Sign(y Int8x32) Int8x32 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNW, CPU Feature: AVX -func (x Int16x8) Sign(y Int16x8) Int16x8 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNW, CPU Feature: AVX2 -func (x Int16x16) Sign(y Int16x16) Int16x16 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGND, CPU Feature: AVX -func (x Int32x4) Sign(y Int32x4) Int32x4 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGND, CPU Feature: AVX2 -func (x Int32x8) Sign(y Int32x8) Int32x8 - /* Sqrt */ // Sqrt computes the square root of each element. @@ -11981,32 +11950,32 @@ func (x Int16x32) SubSaturated(y Int16x32) Int16x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX +// Asm: VPSUBUSB, CPU Feature: AVX func (x Uint8x16) SubSaturated(y Uint8x16) Uint8x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX2 +// Asm: VPSUBUSB, CPU Feature: AVX2 func (x Uint8x32) SubSaturated(y Uint8x32) Uint8x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x64) SubSaturated(y Uint8x64) Uint8x64 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX +// Asm: VPSUBUSW, CPU Feature: AVX func (x Uint16x8) SubSaturated(y Uint16x8) Uint16x8 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX2 +// Asm: VPSUBUSW, CPU Feature: AVX2 func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 /* SubSaturatedMasked */ @@ -12057,42 +12026,42 @@ func (x Int16x32) SubSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x16) SubSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x32) SubSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x64) SubSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x8) SubSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x16) SubSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x32) SubSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Trunc */ @@ -12317,46 +12286,6 @@ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 -/* UnsignedSignedQuadDotProdAccumulate */ - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x32) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x64) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 - -/* UnsignedSignedQuadDotProdAccumulateMasked */ - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x32) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x64) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 - /* Xor */ // Xor performs a bitwise XOR operation between two vectors. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 7776a8afdaa2e9..4c3817599e3605 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -203,25 +203,6 @@ func TestExpand(t *testing.T) { } } -func TestPairDotProdAccumulate(t *testing.T) { - if !simd.HasAVX512GFNI() { - // TODO: this function is actually VNNI, let's implement and call the right check. - t.Skip("Test requires HasAVX512GFNI, not available on this hardware") - return - } - x := simd.LoadInt16x8Slice([]int16{2, 2, 2, 2, 2, 2, 2, 2}) - z := simd.LoadInt32x4Slice([]int32{3, 3, 3, 3}) - want := []int32{11, 11, 11, 11} - got := make([]int32, 4) - z = z.AddDotProd(x, x) - z.StoreSlice(got) - for i := range 4 { - if got[i] != want[i] { - t.Errorf("a and b differ at index %d, got=%d, want=%d", i, got[i], want[i]) - } - } -} - var testShiftAllVal uint64 = 3 func TestShiftAll(t *testing.T) { diff --git a/src/simd/ternary_test.go b/src/simd/ternary_test.go index 9ce0ff7676eaa3..2374635917d49b 100644 --- a/src/simd/ternary_test.go +++ b/src/simd/ternary_test.go @@ -13,11 +13,11 @@ import ( func TestFMA(t *testing.T) { if simd.HasAVX512() { - testFloat32x4TernaryFlaky(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32], 0.001) - testFloat32x8TernaryFlaky(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32], 0.001) - testFloat32x16TernaryFlaky(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32], 0.001) - testFloat64x2Ternary(t, simd.Float64x2.FusedMultiplyAdd, fmaSlice[float64]) - testFloat64x4Ternary(t, simd.Float64x4.FusedMultiplyAdd, fmaSlice[float64]) - testFloat64x8Ternary(t, simd.Float64x8.FusedMultiplyAdd, fmaSlice[float64]) + testFloat32x4TernaryFlaky(t, simd.Float32x4.MulAdd, fmaSlice[float32], 0.001) + testFloat32x8TernaryFlaky(t, simd.Float32x8.MulAdd, fmaSlice[float32], 0.001) + testFloat32x16TernaryFlaky(t, simd.Float32x16.MulAdd, fmaSlice[float32], 0.001) + testFloat64x2Ternary(t, simd.Float64x2.MulAdd, fmaSlice[float64]) + testFloat64x4Ternary(t, simd.Float64x4.MulAdd, fmaSlice[float64]) + testFloat64x8Ternary(t, simd.Float64x8.MulAdd, fmaSlice[float64]) } } diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index c9fdfff0ffc61e..5709ca73c7e7ad 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -46,10 +46,10 @@ func TestTrunc(t *testing.T) { } func TestRound(t *testing.T) { - testFloat32x4Unary(t, simd.Float32x4.Round, roundSlice[float32]) - testFloat32x8Unary(t, simd.Float32x8.Round, roundSlice[float32]) - testFloat64x2Unary(t, simd.Float64x2.Round, roundSlice[float64]) - testFloat64x4Unary(t, simd.Float64x4.Round, roundSlice[float64]) + testFloat32x4Unary(t, simd.Float32x4.RoundToEven, roundSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.RoundToEven, roundSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.RoundToEven, roundSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.RoundToEven, roundSlice[float64]) if simd.HasAVX512() { // testFloat32x16Unary(t, simd.Float32x16.Round, roundSlice[float32]) // missing // testFloat64x8Unary(t, simd.Float64x8.Round, roundSlice[float64]) // missing @@ -68,19 +68,19 @@ func TestSqrt(t *testing.T) { } func TestAbsolute(t *testing.T) { - testInt8x16Unary(t, simd.Int8x16.Absolute, map1[int8](abs)) - testInt8x32Unary(t, simd.Int8x32.Absolute, map1[int8](abs)) - testInt16x8Unary(t, simd.Int16x8.Absolute, map1[int16](abs)) - testInt16x16Unary(t, simd.Int16x16.Absolute, map1[int16](abs)) - testInt32x4Unary(t, simd.Int32x4.Absolute, map1[int32](abs)) - testInt32x8Unary(t, simd.Int32x8.Absolute, map1[int32](abs)) + testInt8x16Unary(t, simd.Int8x16.Abs, map1[int8](abs)) + testInt8x32Unary(t, simd.Int8x32.Abs, map1[int8](abs)) + testInt16x8Unary(t, simd.Int16x8.Abs, map1[int16](abs)) + testInt16x16Unary(t, simd.Int16x16.Abs, map1[int16](abs)) + testInt32x4Unary(t, simd.Int32x4.Abs, map1[int32](abs)) + testInt32x8Unary(t, simd.Int32x8.Abs, map1[int32](abs)) if simd.HasAVX512() { - testInt8x64Unary(t, simd.Int8x64.Absolute, map1[int8](abs)) - testInt16x32Unary(t, simd.Int16x32.Absolute, map1[int16](abs)) - testInt32x16Unary(t, simd.Int32x16.Absolute, map1[int32](abs)) - testInt64x2Unary(t, simd.Int64x2.Absolute, map1[int64](abs)) - testInt64x4Unary(t, simd.Int64x4.Absolute, map1[int64](abs)) - testInt64x8Unary(t, simd.Int64x8.Absolute, map1[int64](abs)) + testInt8x64Unary(t, simd.Int8x64.Abs, map1[int8](abs)) + testInt16x32Unary(t, simd.Int16x32.Abs, map1[int16](abs)) + testInt32x16Unary(t, simd.Int32x16.Abs, map1[int32](abs)) + testInt64x2Unary(t, simd.Int64x2.Abs, map1[int64](abs)) + testInt64x4Unary(t, simd.Int64x4.Abs, map1[int64](abs)) + testInt64x8Unary(t, simd.Int64x8.Abs, map1[int64](abs)) } } From 94d72355f662a1c8229db661cc068ea8e901641c Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 30 Jul 2025 17:42:10 -0400 Subject: [PATCH 118/139] [dev.simd] simd: add emulations for bitwise ops and for mask/merge methods This CL adds the emulations under a "wrong name"; subsequent CLs will move the AVX512 versions of these operations out of the way, and then will rename these to their better names. Change-Id: I49e7a73e4fea74fb7bd26cb8062014568d7999ca Reviewed-on: https://go-review.googlesource.com/c/go/+/692217 Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/simd/genfiles.go | 82 +++++++- src/simd/simd_test.go | 14 ++ src/simd/slice_amd64.go | 408 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 488 insertions(+), 16 deletions(-) diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 269659a65350e5..c7c6aae374265d 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -50,13 +50,20 @@ var convert32Shapes = &shapes{ floats: []int{32}, } -var avx512MaskedLoadShapes = &shapes{ +var avx512Shapes = &shapes{ vecs: []int{512}, ints: []int{8, 16, 32, 64}, uints: []int{8, 16, 32, 64}, floats: []int{32, 64}, } +var avx2Shapes = &shapes{ + vecs: []int{128, 256}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + var avx2MaskedLoadShapes = &shapes{ vecs: []int{128, 256}, ints: []int{32, 64}, @@ -70,12 +77,12 @@ var avx2SmallLoadPunShapes = &shapes{ uints: []int{8, 16}, } -var unaryFlaky = &shapes{ +var unaryFlaky = &shapes{ // for tests that support flaky equality vecs: []int{128, 256, 512}, floats: []int{32, 64}, } -var ternaryFlaky = &shapes{ +var ternaryFlaky = &shapes{ // for tests that support flaky equality vecs: []int{128, 256, 512}, floats: []int{32}, } @@ -88,6 +95,7 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io BaseType := strings.ToUpper(baseType[:1]) + baseType[1:] eType := fmt.Sprintf("%s%d", baseType, width) wxc := fmt.Sprintf("%dx%d", width, count) + bxc := fmt.Sprintf("%dx%d", 8, count*(width/8)) vType := fmt.Sprintf("%s%s", BaseType, wxc) aOrAn := "a" if strings.Contains("aeiou", baseType[:1]) { @@ -100,6 +108,8 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io Width int // the bit width of the element type, e.g. 32 Count int // the number of elements, e.g. 4 WxC string // the width-by-type string, e.g., "32x4" + BxC string // as if bytes, in the proper count, e.g., "8x16" (W==8) + Base string // the capitalized Base Type of the vector, e.g., "Float" Type string // the element type, e.g. "float32" OxFF string // a mask for the lowest 'count' bits }{ @@ -108,6 +118,8 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io Width: width, Count: count, WxC: wxc, + BxC: bxc, + Base: BaseType, Type: eType, OxFF: oxFF, }) @@ -373,7 +385,7 @@ func test{{.Vec}}CompareMasked(t *testing.T, } `) -var avx512MaskedLoadSlicePartTemplate = shapedTemplateOf(avx512MaskedLoadShapes, "avx 512 load slice part", ` +var avx512MaskedLoadSlicePartTemplate = shapedTemplateOf(avx512Shapes, "avx 512 load slice part", ` // Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. // If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. // If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. @@ -386,7 +398,6 @@ func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { var x {{.Vec}} return x } - mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) return LoadMasked{{.Vec}}(pa{{.Vec}}(s), mask) } @@ -476,6 +487,58 @@ func pa{{.Vec}}(s []{{.Type}}) *[{{.Count}}]{{.Type}} { } `) +var avx2MaskedTemplate = shapedTemplateOf(avx2Shapes, "avx2 .Masked methods", ` +// Masked returns x but with elements zeroed where mask is false. +func (x {{.Vec}}) Masked(mask Mask{{.WxC}}) {{.Vec}} { + im := mask.AsInt{{.WxC}}() +{{- if eq .Base "Int" }} + return im.And(x) +{{- else}} + return x.AsInt{{.WxC}}().And(im).As{{.Vec}}() +{{- end -}} +} + +// Merge returns x but with elements set to y where mask is false. +func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { +{{- if eq .BxC .WxC }} + im := mask.AsInt{{.BxC}}() +{{- else}} + im := mask.AsInt{{.WxC}}().AsInt{{.BxC}}() +{{- end -}} +{{- if and (eq .Base "Int") (eq .BxC .WxC) }} + return y.blend(x, im) +{{- else}} + ix := x.AsInt{{.BxC}}() + iy := y.AsInt{{.BxC}}() + return iy.blend(ix, im).As{{.Vec}}() +{{- end -}} +} +`) + +// TODO perhaps write these in ways that work better on AVX512 +var avx512MaskedTemplate = shapedTemplateOf(avx512Shapes, "avx512 .Masked methods", ` +// Masked returns x but with elements zeroed where mask is false. +func (x {{.Vec}}) Masked(mask Mask{{.WxC}}) {{.Vec}} { + im := mask.AsInt{{.WxC}}() +{{- if eq .Base "Int" }} + return im.And(x) +{{- else}} + return x.AsInt{{.WxC}}().And(im).As{{.Vec}}() +{{- end -}} +} + +// Merge returns x but with elements set to y where m is false. +func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { +{{- if eq .Base "Int" }} + return y.blendMasked(x, mask) +{{- else}} + ix := x.AsInt{{.WxC}}() + iy := y.AsInt{{.WxC}}() + return iy.blendMasked(ix, mask).As{{.Vec}}() +{{- end -}} +} +`) + func main() { sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") @@ -487,7 +550,14 @@ func main() { flag.Parse() if *sl != "" { - one(*sl, prologue, sliceTemplate, avx512MaskedLoadSlicePartTemplate, avx2MaskedLoadSlicePartTemplate, avx2SmallLoadSlicePartTemplate) + one(*sl, prologue, + sliceTemplate, + avx512MaskedLoadSlicePartTemplate, + avx2MaskedLoadSlicePartTemplate, + avx2SmallLoadSlicePartTemplate, + avx2MaskedTemplate, + avx512MaskedTemplate, + ) } if *ush != "" { one(*ush, unsafePrologue, unsafePATemplate) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 4c3817599e3605..2fef6417d2bca6 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -382,3 +382,17 @@ func TestBitMaskToBits(t *testing.T) { t.Errorf("Want 0b101, got %b", v) } } + +func TestMergeFloat(t *testing.T) { + a := simd.LoadFloat64x4Slice([]float64{1, 2, 3, 4}) + b := simd.LoadFloat64x4Slice([]float64{4, 2, 3, 1}) + g := a.Greater(b) + k := make([]int64, 4, 4) + g.AsInt64x4().StoreSlice(k) + checkSlices[int64](t, k, []int64{0, 0, 0, -1}) + c := a.Merge(b, g) + + s := make([]float64, 4, 4) + c.StoreSlice(s) + checkSlices[float64](t, s, []float64{4, 2, 3, 4}) +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index bd1d4f153089de..a43660cba4e9b9 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -318,7 +318,6 @@ func LoadInt8x64SlicePart(s []int8) Int8x64 { var x Int8x64 return x } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) return LoadMaskedInt8x64(paInt8x64(s), mask) } @@ -351,7 +350,6 @@ func LoadInt16x32SlicePart(s []int16) Int16x32 { var x Int16x32 return x } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) return LoadMaskedInt16x32(paInt16x32(s), mask) } @@ -384,7 +382,6 @@ func LoadInt32x16SlicePart(s []int32) Int32x16 { var x Int32x16 return x } - mask := Mask32x16FromBits(0xffff >> (16 - l)) return LoadMaskedInt32x16(paInt32x16(s), mask) } @@ -417,7 +414,6 @@ func LoadInt64x8SlicePart(s []int64) Int64x8 { var x Int64x8 return x } - mask := Mask64x8FromBits(0xff >> (8 - l)) return LoadMaskedInt64x8(paInt64x8(s), mask) } @@ -450,7 +446,6 @@ func LoadUint8x64SlicePart(s []uint8) Uint8x64 { var x Uint8x64 return x } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) return LoadMaskedUint8x64(paUint8x64(s), mask) } @@ -483,7 +478,6 @@ func LoadUint16x32SlicePart(s []uint16) Uint16x32 { var x Uint16x32 return x } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) return LoadMaskedUint16x32(paUint16x32(s), mask) } @@ -516,7 +510,6 @@ func LoadUint32x16SlicePart(s []uint32) Uint32x16 { var x Uint32x16 return x } - mask := Mask32x16FromBits(0xffff >> (16 - l)) return LoadMaskedUint32x16(paUint32x16(s), mask) } @@ -549,7 +542,6 @@ func LoadUint64x8SlicePart(s []uint64) Uint64x8 { var x Uint64x8 return x } - mask := Mask64x8FromBits(0xff >> (8 - l)) return LoadMaskedUint64x8(paUint64x8(s), mask) } @@ -582,7 +574,6 @@ func LoadFloat32x16SlicePart(s []float32) Float32x16 { var x Float32x16 return x } - mask := Mask32x16FromBits(0xffff >> (16 - l)) return LoadMaskedFloat32x16(paFloat32x16(s), mask) } @@ -615,7 +606,6 @@ func LoadFloat64x8SlicePart(s []float64) Float64x8 { var x Float64x8 return x } - mask := Mask64x8FromBits(0xff >> (8 - l)) return LoadMaskedFloat64x8(paFloat64x8(s), mask) } @@ -1111,3 +1101,401 @@ func (x Uint16x16) StoreSlicePart(s []uint16) { t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) x.AsInt16x16().StoreSlicePart(t) } + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x16) Masked(mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x16) Merge(y Int8x16, mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x8) Masked(mask Mask16x8) Int16x8 { + im := mask.AsInt16x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x8) Merge(y Int16x8, mask Mask16x8) Int16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x4) Masked(mask Mask32x4) Int32x4 { + im := mask.AsInt32x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x4) Merge(y Int32x4, mask Mask32x4) Int32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x2) Masked(mask Mask64x2) Int64x2 { + im := mask.AsInt64x2() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x2) Merge(y Int64x2, mask Mask64x2) Int64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x16) Masked(mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + return x.AsInt8x16().And(im).AsUint8x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x16) Merge(y Uint8x16, mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint8x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x8) Masked(mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8() + return x.AsInt16x8().And(im).AsUint16x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x8) Merge(y Uint16x8, mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x4) Masked(mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsUint32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x4) Merge(y Uint32x4, mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x2) Masked(mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsUint64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x2) Merge(y Uint64x2, mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x4) Masked(mask Mask32x4) Float32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsFloat32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x4) Merge(y Float32x4, mask Mask32x4) Float32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x2) Masked(mask Mask64x2) Float64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsFloat64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x2) Merge(y Float64x2, mask Mask64x2) Float64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x32) Masked(mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x32) Merge(y Int8x32, mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x16) Masked(mask Mask16x16) Int16x16 { + im := mask.AsInt16x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x16) Merge(y Int16x16, mask Mask16x16) Int16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x8) Masked(mask Mask32x8) Int32x8 { + im := mask.AsInt32x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x8) Merge(y Int32x8, mask Mask32x8) Int32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x4) Masked(mask Mask64x4) Int64x4 { + im := mask.AsInt64x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x4) Merge(y Int64x4, mask Mask64x4) Int64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x32) Masked(mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + return x.AsInt8x32().And(im).AsUint8x32() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x32) Merge(y Uint8x32, mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint8x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x16) Masked(mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16() + return x.AsInt16x16().And(im).AsUint16x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x16) Merge(y Uint16x16, mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x8) Masked(mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsUint32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x8) Merge(y Uint32x8, mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x4) Masked(mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsUint64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x4) Merge(y Uint64x4, mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x8) Masked(mask Mask32x8) Float32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsFloat32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x8) Merge(y Float32x8, mask Mask32x8) Float32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x4) Masked(mask Mask64x4) Float64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsFloat64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x4) Merge(y Float64x4, mask Mask64x4) Float64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x64) Masked(mask Mask8x64) Int8x64 { + im := mask.AsInt8x64() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int8x64) Merge(y Int8x64, mask Mask8x64) Int8x64 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x32) Masked(mask Mask16x32) Int16x32 { + im := mask.AsInt16x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int16x32) Merge(y Int16x32, mask Mask16x32) Int16x32 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x16) Masked(mask Mask32x16) Int32x16 { + im := mask.AsInt32x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int32x16) Merge(y Int32x16, mask Mask32x16) Int32x16 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x8) Masked(mask Mask64x8) Int64x8 { + im := mask.AsInt64x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int64x8) Merge(y Int64x8, mask Mask64x8) Int64x8 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x64) Masked(mask Mask8x64) Uint8x64 { + im := mask.AsInt8x64() + return x.AsInt8x64().And(im).AsUint8x64() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint8x64) Merge(y Uint8x64, mask Mask8x64) Uint8x64 { + ix := x.AsInt8x64() + iy := y.AsInt8x64() + return iy.blendMasked(ix, mask).AsUint8x64() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x32) Masked(mask Mask16x32) Uint16x32 { + im := mask.AsInt16x32() + return x.AsInt16x32().And(im).AsUint16x32() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint16x32) Merge(y Uint16x32, mask Mask16x32) Uint16x32 { + ix := x.AsInt16x32() + iy := y.AsInt16x32() + return iy.blendMasked(ix, mask).AsUint16x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x16) Masked(mask Mask32x16) Uint32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsUint32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint32x16) Merge(y Uint32x16, mask Mask32x16) Uint32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsUint32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x8) Masked(mask Mask64x8) Uint64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsUint64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint64x8) Merge(y Uint64x8, mask Mask64x8) Uint64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsUint64x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x16) Masked(mask Mask32x16) Float32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsFloat32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float32x16) Merge(y Float32x16, mask Mask32x16) Float32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsFloat32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x8) Masked(mask Mask64x8) Float64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsFloat64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsFloat64x8() +} From 38b76bf2a3b4a2e1bd512f32907d7f2d3de3b71a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 8 Aug 2025 17:31:45 +0000 Subject: [PATCH 119/139] [dev.simd] cmd/compile, simd: jump table for imm ops This CL fixes some errors in prog generation for imm operations, please see the changes in ssa.go for details. This CL also implements the jump table for non-const immediate arg. The current implementation exhaust 0-255, the bound-checked version will be in the next CL. This CL is partially generated by CL 694375. Change-Id: I75fe9900430b4fca5b39b0c0958a13b20b1104b7 Reviewed-on: https://go-review.googlesource.com/c/go/+/694395 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 52 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 144 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 460 ++--- .../internal/ssa/_gen/simdgenericOps.go | 488 ++--- src/cmd/compile/internal/ssa/check.go | 3 +- src/cmd/compile/internal/ssa/opGen.go | 948 ++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1800 ++++++++--------- src/cmd/compile/internal/ssagen/intrinsics.go | 102 +- src/simd/ops_amd64.go | 488 ++--- src/simd/simd_test.go | 16 + 10 files changed, 2258 insertions(+), 2243 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 9a4203f7c6786f..d3fae7ce14c8e6 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1837,11 +1837,7 @@ func simdVkv(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VROUNDPD $7, X2, X2 func simdV11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) p.To.Type = obj.TYPE_REG @@ -1852,11 +1848,7 @@ func simdV11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VREDUCEPD $126, X1, K3, X31 func simdVkvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) p.AddRestSourceReg(maskReg(v.Args[1])) @@ -1868,11 +1860,7 @@ func simdVkvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VCMPPS $7, X2, X9, X2 func simdV21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1884,11 +1872,7 @@ func simdV21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VPINSRB $3, DX, X0, X0 func simdVgpvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(v.Args[1].Reg()) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1900,11 +1884,7 @@ func simdVgpvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VPCMPD $1, Z1, Z2, K1 func simdV2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1916,11 +1896,7 @@ func simdV2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VPCMPD $1, Z1, Z2, K2, K1 func simdV2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1931,7 +1907,15 @@ func simdV2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } func simdV2kvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdV2kkImm8(s, v) + p := s.Prog(v.Op.Asm()) + p.From.Offset = int64(v.AuxUInt8()) + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p } // Example instruction: VFMADD213PD Z2, Z1, Z0 @@ -1959,11 +1943,7 @@ func simdV3kvResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { func simdVgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) p.To.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e294836cd26cc2..8ff638808abc3f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1438,41 +1438,41 @@ (SetLoUint32x16 x y) => (VINSERTI64X4512 [0] x y) (SetLoUint64x4 x y) => (VINSERTI128256 [0] x y) (SetLoUint64x8 x y) => (VINSERTI64X4512 [0] x y) -(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) (ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) (ShiftAllLeftInt16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) (ShiftAllLeftInt16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) (ShiftAllLeftInt32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) (ShiftAllLeftInt32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) (ShiftAllLeftInt32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) (ShiftAllLeftInt64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) (ShiftAllLeftInt64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) (ShiftAllLeftInt64x8 x y) => (VPSLLQ512 x y) -(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) (ShiftAllLeftUint16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) (ShiftAllLeftUint16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) (ShiftAllLeftUint16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) (ShiftAllLeftUint32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) (ShiftAllLeftUint32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) (ShiftAllLeftUint32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) (ShiftAllLeftUint64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) (ShiftAllLeftUint64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) (ShiftAllLeftUint64x8 x y) => (VPSLLQ512 x y) (ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) (ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) @@ -1510,77 +1510,77 @@ (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [int8(c)] x) +(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) (ShiftAllRightInt16x8 x y) => (VPSRAW128 x y) -(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [int8(c)] x) +(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [uint8(c)] x) (ShiftAllRightInt16x16 x y) => (VPSRAW256 x y) -(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [int8(c)] x) +(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [uint8(c)] x) (ShiftAllRightInt16x32 x y) => (VPSRAW512 x y) -(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [int8(c)] x) +(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [uint8(c)] x) (ShiftAllRightInt32x4 x y) => (VPSRAD128 x y) -(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [int8(c)] x) +(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [uint8(c)] x) (ShiftAllRightInt32x8 x y) => (VPSRAD256 x y) -(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [int8(c)] x) +(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [uint8(c)] x) (ShiftAllRightInt32x16 x y) => (VPSRAD512 x y) -(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [int8(c)] x) +(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [uint8(c)] x) (ShiftAllRightInt64x2 x y) => (VPSRAQ128 x y) -(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [int8(c)] x) +(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [uint8(c)] x) (ShiftAllRightInt64x4 x y) => (VPSRAQ256 x y) -(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [int8(c)] x) +(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [uint8(c)] x) (ShiftAllRightInt64x8 x y) => (VPSRAQ512 x y) -(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [int8(c)] x) +(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [uint8(c)] x) (ShiftAllRightUint16x8 x y) => (VPSRLW128 x y) -(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [int8(c)] x) +(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [uint8(c)] x) (ShiftAllRightUint16x16 x y) => (VPSRLW256 x y) -(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [int8(c)] x) +(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [uint8(c)] x) (ShiftAllRightUint16x32 x y) => (VPSRLW512 x y) -(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [int8(c)] x) +(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [uint8(c)] x) (ShiftAllRightUint32x4 x y) => (VPSRLD128 x y) -(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [int8(c)] x) +(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [uint8(c)] x) (ShiftAllRightUint32x8 x y) => (VPSRLD256 x y) -(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [int8(c)] x) +(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [uint8(c)] x) (ShiftAllRightUint32x16 x y) => (VPSRLD512 x y) -(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [int8(c)] x) +(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [uint8(c)] x) (ShiftAllRightUint64x2 x y) => (VPSRLQ128 x y) -(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [int8(c)] x) +(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [uint8(c)] x) (ShiftAllRightUint64x4 x y) => (VPSRLQ256 x y) -(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [int8(c)] x) +(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [uint8(c)] x) (ShiftAllRightUint64x8 x y) => (VPSRLQ512 x y) (ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) (ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) @@ -1618,41 +1618,41 @@ (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 665372f79d9aa7..164ca7a344487d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -861,235 +861,235 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "UInt8", commutative: false, typ: "int32", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "UInt8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 45c62f95a7ff85..416c53c44542b9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1665,249 +1665,249 @@ func simdGenericOps() []opData { {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, - {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, } } diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index f33c9bc87b8f1b..6baa3cc311571b 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -150,7 +150,8 @@ func checkFunc(f *Func) { case auxInt128: // AuxInt must be zero, so leave canHaveAuxInt set to false. case auxUInt8: - if v.AuxInt != int64(uint8(v.AuxInt)) { + // Cast to int8 due to requirement of AuxInt, check its comment for details. + if v.AuxInt != int64(int8(v.AuxInt)) { f.Fatalf("bad uint8 AuxInt value for %v", v) } canHaveAuxInt = true diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 8bf850d78ed503..d4e4f710a76c33 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -32362,7 +32362,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPS, reg: regInfo{ @@ -32376,7 +32376,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPS, reg: regInfo{ @@ -32390,7 +32390,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPD, reg: regInfo{ @@ -32404,7 +32404,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPD, reg: regInfo{ @@ -32418,7 +32418,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32432,7 +32432,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32446,7 +32446,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPS512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32460,7 +32460,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32474,7 +32474,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32488,7 +32488,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32502,7 +32502,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPSMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32517,7 +32517,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPSMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32532,7 +32532,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPSMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32547,7 +32547,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32562,7 +32562,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32577,7 +32577,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32592,7 +32592,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32606,7 +32606,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32620,7 +32620,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPS512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32634,7 +32634,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32648,7 +32648,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32662,7 +32662,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32676,7 +32676,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPSMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32691,7 +32691,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPSMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32706,7 +32706,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPSMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32721,7 +32721,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32736,7 +32736,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32751,7 +32751,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32766,7 +32766,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPS, @@ -32782,7 +32782,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPS, @@ -32798,7 +32798,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPS512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPS, @@ -32814,7 +32814,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPD, @@ -32830,7 +32830,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPD, @@ -32846,7 +32846,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPD, @@ -32862,7 +32862,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPSMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPS, @@ -32879,7 +32879,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPSMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPS, @@ -32896,7 +32896,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPSMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPS, @@ -32913,7 +32913,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPD, @@ -32930,7 +32930,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPD, @@ -32947,7 +32947,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPD, @@ -32964,7 +32964,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPB, @@ -32981,7 +32981,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPB, @@ -32998,7 +32998,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPB, @@ -33015,7 +33015,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPW, @@ -33032,7 +33032,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPW, @@ -33049,7 +33049,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPW, @@ -33066,7 +33066,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPD, @@ -33083,7 +33083,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPD, @@ -33100,7 +33100,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPD, @@ -33117,7 +33117,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPQ, @@ -33134,7 +33134,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPQ, @@ -33151,7 +33151,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPQ, @@ -33168,7 +33168,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUB, @@ -33185,7 +33185,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUB, @@ -33202,7 +33202,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUB, @@ -33219,7 +33219,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUW, @@ -33236,7 +33236,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUW, @@ -33253,7 +33253,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUW, @@ -33270,7 +33270,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUD, @@ -33287,7 +33287,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUD, @@ -33304,7 +33304,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUD, @@ -33321,7 +33321,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUQ, @@ -33338,7 +33338,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUQ, @@ -33355,7 +33355,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUQ, @@ -33372,7 +33372,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33387,7 +33387,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33402,7 +33402,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33417,7 +33417,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33432,7 +33432,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33447,7 +33447,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33462,7 +33462,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33478,7 +33478,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33494,7 +33494,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33510,7 +33510,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33526,7 +33526,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33542,7 +33542,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33558,7 +33558,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRB, reg: regInfo{ @@ -33572,7 +33572,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRW, reg: regInfo{ @@ -33586,7 +33586,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRD, reg: regInfo{ @@ -33600,7 +33600,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRQ, reg: regInfo{ @@ -33614,7 +33614,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTF128128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTF128, reg: regInfo{ @@ -33628,7 +33628,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTF64X4256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTF64X4, reg: regInfo{ @@ -33642,7 +33642,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTI128128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTI128, reg: regInfo{ @@ -33656,7 +33656,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTI64X4256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTI64X4, reg: regInfo{ @@ -33670,7 +33670,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUB, reg: regInfo{ @@ -33685,7 +33685,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUB, reg: regInfo{ @@ -33700,7 +33700,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUB, reg: regInfo{ @@ -33715,7 +33715,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUW, reg: regInfo{ @@ -33730,7 +33730,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUW, reg: regInfo{ @@ -33745,7 +33745,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUW, reg: regInfo{ @@ -33760,7 +33760,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUD, reg: regInfo{ @@ -33775,7 +33775,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUD, reg: regInfo{ @@ -33790,7 +33790,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUD, reg: regInfo{ @@ -33805,7 +33805,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUQ, reg: regInfo{ @@ -33820,7 +33820,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUQ, reg: regInfo{ @@ -33835,7 +33835,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUQ, reg: regInfo{ @@ -33850,7 +33850,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPB, reg: regInfo{ @@ -33865,7 +33865,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPB, reg: regInfo{ @@ -33880,7 +33880,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPB, reg: regInfo{ @@ -33895,7 +33895,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ @@ -33910,7 +33910,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ @@ -33925,7 +33925,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ @@ -33940,7 +33940,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ @@ -33955,7 +33955,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ @@ -33970,7 +33970,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ @@ -33985,7 +33985,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPQ, reg: regInfo{ @@ -34000,7 +34000,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPQ, reg: regInfo{ @@ -34015,7 +34015,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPQ, reg: regInfo{ @@ -34030,7 +34030,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLD, reg: regInfo{ @@ -34044,7 +34044,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLD, reg: regInfo{ @@ -34058,7 +34058,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLD, reg: regInfo{ @@ -34072,7 +34072,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ @@ -34086,7 +34086,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ @@ -34100,7 +34100,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ @@ -34114,7 +34114,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLD, reg: regInfo{ @@ -34129,7 +34129,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLD, reg: regInfo{ @@ -34144,7 +34144,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLD, reg: regInfo{ @@ -34159,7 +34159,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ @@ -34174,7 +34174,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ @@ -34189,7 +34189,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ @@ -34204,7 +34204,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORD, reg: regInfo{ @@ -34218,7 +34218,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORD, reg: regInfo{ @@ -34232,7 +34232,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORD, reg: regInfo{ @@ -34246,7 +34246,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ @@ -34260,7 +34260,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ @@ -34274,7 +34274,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ @@ -34288,7 +34288,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORD, reg: regInfo{ @@ -34303,7 +34303,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORD, reg: regInfo{ @@ -34318,7 +34318,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORD, reg: regInfo{ @@ -34333,7 +34333,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ @@ -34348,7 +34348,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ @@ -34363,7 +34363,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ @@ -34378,7 +34378,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRB, reg: regInfo{ @@ -34393,7 +34393,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRW, reg: regInfo{ @@ -34408,7 +34408,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRD, reg: regInfo{ @@ -34423,7 +34423,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRQ, reg: regInfo{ @@ -34438,7 +34438,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTF128256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTF128, reg: regInfo{ @@ -34453,7 +34453,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTF64X4512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTF64X4, reg: regInfo{ @@ -34468,7 +34468,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTI128256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTI128, reg: regInfo{ @@ -34483,7 +34483,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTI64X4512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTI64X4, reg: regInfo{ @@ -34498,7 +34498,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34513,7 +34513,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34528,7 +34528,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34543,7 +34543,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34558,7 +34558,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34573,7 +34573,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34588,7 +34588,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34603,7 +34603,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34618,7 +34618,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34633,7 +34633,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34649,7 +34649,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34665,7 +34665,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34681,7 +34681,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34697,7 +34697,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34713,7 +34713,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34729,7 +34729,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34745,7 +34745,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34761,7 +34761,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34777,7 +34777,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34792,7 +34792,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34807,7 +34807,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34822,7 +34822,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34837,7 +34837,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34852,7 +34852,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34867,7 +34867,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -34882,7 +34882,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -34897,7 +34897,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -34912,7 +34912,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34928,7 +34928,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34944,7 +34944,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34960,7 +34960,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34976,7 +34976,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34992,7 +34992,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ @@ -35008,7 +35008,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -35024,7 +35024,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -35040,7 +35040,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -35056,7 +35056,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLW128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLW, reg: regInfo{ @@ -35070,7 +35070,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLW256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLW, reg: regInfo{ @@ -35084,7 +35084,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLW512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLW, reg: regInfo{ @@ -35098,7 +35098,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLD128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLD, reg: regInfo{ @@ -35112,7 +35112,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLD256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLD, reg: regInfo{ @@ -35126,7 +35126,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLD512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLD, reg: regInfo{ @@ -35140,7 +35140,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQ128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35154,7 +35154,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQ256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35168,7 +35168,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQ512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35182,7 +35182,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLWMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLW, reg: regInfo{ @@ -35197,7 +35197,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLWMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLW, reg: regInfo{ @@ -35212,7 +35212,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLWMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLW, reg: regInfo{ @@ -35227,7 +35227,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLDMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLD, reg: regInfo{ @@ -35242,7 +35242,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLDMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLD, reg: regInfo{ @@ -35257,7 +35257,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLDMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLD, reg: regInfo{ @@ -35272,7 +35272,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35287,7 +35287,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35302,7 +35302,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35317,7 +35317,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLW128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLW, reg: regInfo{ @@ -35331,7 +35331,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLW256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLW, reg: regInfo{ @@ -35345,7 +35345,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLW512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLW, reg: regInfo{ @@ -35359,7 +35359,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLD128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLD, reg: regInfo{ @@ -35373,7 +35373,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLD256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLD, reg: regInfo{ @@ -35387,7 +35387,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLD512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLD, reg: regInfo{ @@ -35401,7 +35401,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQ128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35415,7 +35415,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQ256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35429,7 +35429,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQ512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35443,7 +35443,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAW128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAW, reg: regInfo{ @@ -35457,7 +35457,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAW256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAW, reg: regInfo{ @@ -35471,7 +35471,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAW512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAW, reg: regInfo{ @@ -35485,7 +35485,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAD128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAD, reg: regInfo{ @@ -35499,7 +35499,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAD256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAD, reg: regInfo{ @@ -35513,7 +35513,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAD512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAD, reg: regInfo{ @@ -35527,7 +35527,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQ128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35541,7 +35541,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQ256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35555,7 +35555,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQ512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35569,7 +35569,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLWMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLW, reg: regInfo{ @@ -35584,7 +35584,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLWMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLW, reg: regInfo{ @@ -35599,7 +35599,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLWMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLW, reg: regInfo{ @@ -35614,7 +35614,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLDMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLD, reg: regInfo{ @@ -35629,7 +35629,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLDMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLD, reg: regInfo{ @@ -35644,7 +35644,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLDMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLD, reg: regInfo{ @@ -35659,7 +35659,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35674,7 +35674,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35689,7 +35689,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35704,7 +35704,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAWMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAW, reg: regInfo{ @@ -35719,7 +35719,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAWMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAW, reg: regInfo{ @@ -35734,7 +35734,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAWMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAW, reg: regInfo{ @@ -35749,7 +35749,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRADMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAD, reg: regInfo{ @@ -35764,7 +35764,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRADMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAD, reg: regInfo{ @@ -35779,7 +35779,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRADMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAD, reg: regInfo{ @@ -35794,7 +35794,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35809,7 +35809,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35824,7 +35824,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAQ, reg: regInfo{ @@ -72174,1465 +72174,1465 @@ var opcodeTable = [...]opInfo{ }, { name: "CeilScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformInverseMaskedUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformInverseMaskedUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformInverseMaskedUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformInverseUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformInverseUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformInverseUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformMaskedUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformMaskedUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformMaskedUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GetElemInt8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatMaskedInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 20d014361ee899..865b404d1419da 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -30852,7 +30852,7 @@ func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30864,7 +30864,7 @@ func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30876,7 +30876,7 @@ func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30888,7 +30888,7 @@ func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30898,10 +30898,10 @@ func rewriteValueAMD64_OpCeilScaledFloat32x16(v *Value) bool { // match: (CeilScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30911,10 +30911,10 @@ func rewriteValueAMD64_OpCeilScaledFloat32x4(v *Value) bool { // match: (CeilScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30924,10 +30924,10 @@ func rewriteValueAMD64_OpCeilScaledFloat32x8(v *Value) bool { // match: (CeilScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30937,10 +30937,10 @@ func rewriteValueAMD64_OpCeilScaledFloat64x2(v *Value) bool { // match: (CeilScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30950,10 +30950,10 @@ func rewriteValueAMD64_OpCeilScaledFloat64x4(v *Value) bool { // match: (CeilScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30963,10 +30963,10 @@ func rewriteValueAMD64_OpCeilScaledFloat64x8(v *Value) bool { // match: (CeilScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30978,11 +30978,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v *Value) bool { // match: (CeilScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -30996,11 +30996,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v *Value) bool { // match: (CeilScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31014,11 +31014,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v *Value) bool { // match: (CeilScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31032,11 +31032,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v *Value) bool { // match: (CeilScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31050,11 +31050,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v *Value) bool { // match: (CeilScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31068,11 +31068,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v *Value) bool { // match: (CeilScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31084,10 +31084,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v *Value) bool { // match: (CeilScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31097,10 +31097,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v *Value) bool { // match: (CeilScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31110,10 +31110,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v *Value) bool { // match: (CeilScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31123,10 +31123,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v *Value) bool { // match: (CeilScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31136,10 +31136,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v *Value) bool { // match: (CeilScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31149,10 +31149,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v *Value) bool { // match: (CeilScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31164,11 +31164,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v *Value) bool { // match: (CeilScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31182,11 +31182,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v *Value) bool { // match: (CeilScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31200,11 +31200,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v *Value) bool { // match: (CeilScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31218,11 +31218,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v *Value) bool { // match: (CeilScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31236,11 +31236,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v *Value) bool { // match: (CeilScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31254,11 +31254,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v *Value) bool { // match: (CeilScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -33864,7 +33864,7 @@ func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33879,7 +33879,7 @@ func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33893,7 +33893,7 @@ func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33907,7 +33907,7 @@ func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33921,7 +33921,7 @@ func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33938,7 +33938,7 @@ func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34026,7 +34026,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34048,7 +34048,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34070,7 +34070,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34092,7 +34092,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34114,7 +34114,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34136,7 +34136,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34158,7 +34158,7 @@ func rewriteValueAMD64_OpEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34180,7 +34180,7 @@ func rewriteValueAMD64_OpEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34202,7 +34202,7 @@ func rewriteValueAMD64_OpEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34224,7 +34224,7 @@ func rewriteValueAMD64_OpEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34246,7 +34246,7 @@ func rewriteValueAMD64_OpEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34268,7 +34268,7 @@ func rewriteValueAMD64_OpEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34290,7 +34290,7 @@ func rewriteValueAMD64_OpEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34312,7 +34312,7 @@ func rewriteValueAMD64_OpEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34334,7 +34334,7 @@ func rewriteValueAMD64_OpEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34356,7 +34356,7 @@ func rewriteValueAMD64_OpEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34378,7 +34378,7 @@ func rewriteValueAMD64_OpEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34400,7 +34400,7 @@ func rewriteValueAMD64_OpEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34422,7 +34422,7 @@ func rewriteValueAMD64_OpEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34444,7 +34444,7 @@ func rewriteValueAMD64_OpEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34466,7 +34466,7 @@ func rewriteValueAMD64_OpEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34488,7 +34488,7 @@ func rewriteValueAMD64_OpEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34510,7 +34510,7 @@ func rewriteValueAMD64_OpEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34532,7 +34532,7 @@ func rewriteValueAMD64_OpEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34554,7 +34554,7 @@ func rewriteValueAMD64_OpEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34576,7 +34576,7 @@ func rewriteValueAMD64_OpEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34598,7 +34598,7 @@ func rewriteValueAMD64_OpEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34620,7 +34620,7 @@ func rewriteValueAMD64_OpEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34642,7 +34642,7 @@ func rewriteValueAMD64_OpEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34664,7 +34664,7 @@ func rewriteValueAMD64_OpEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -35254,7 +35254,7 @@ func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35266,7 +35266,7 @@ func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35278,7 +35278,7 @@ func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35290,7 +35290,7 @@ func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35300,10 +35300,10 @@ func rewriteValueAMD64_OpFloorScaledFloat32x16(v *Value) bool { // match: (FloorScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35313,10 +35313,10 @@ func rewriteValueAMD64_OpFloorScaledFloat32x4(v *Value) bool { // match: (FloorScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35326,10 +35326,10 @@ func rewriteValueAMD64_OpFloorScaledFloat32x8(v *Value) bool { // match: (FloorScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35339,10 +35339,10 @@ func rewriteValueAMD64_OpFloorScaledFloat64x2(v *Value) bool { // match: (FloorScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35352,10 +35352,10 @@ func rewriteValueAMD64_OpFloorScaledFloat64x4(v *Value) bool { // match: (FloorScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35365,10 +35365,10 @@ func rewriteValueAMD64_OpFloorScaledFloat64x8(v *Value) bool { // match: (FloorScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35380,11 +35380,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v *Value) bool { // match: (FloorScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35398,11 +35398,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v *Value) bool { // match: (FloorScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35416,11 +35416,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v *Value) bool { // match: (FloorScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35434,11 +35434,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v *Value) bool { // match: (FloorScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35452,11 +35452,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v *Value) bool { // match: (FloorScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35470,11 +35470,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v *Value) bool { // match: (FloorScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35486,10 +35486,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v *Value) bool { // match: (FloorScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35499,10 +35499,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v *Value) bool { // match: (FloorScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35512,10 +35512,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v *Value) bool { // match: (FloorScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35525,10 +35525,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v *Value) bool { // match: (FloorScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35538,10 +35538,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v *Value) bool { // match: (FloorScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35551,10 +35551,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v *Value) bool { // match: (FloorScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35566,11 +35566,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v *Value) bool { // match: (FloorScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35584,11 +35584,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v *Value) bool { // match: (FloorScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35602,11 +35602,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v *Value) bool { // match: (FloorScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35620,11 +35620,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v *Value) bool { // match: (FloorScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35638,11 +35638,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v *Value) bool { // match: (FloorScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35656,11 +35656,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v *Value) bool { // match: (FloorScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35675,12 +35675,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v *Valu // match: (GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35695,12 +35695,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v *Valu // match: (GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35715,12 +35715,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v *Valu // match: (GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35735,12 +35735,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v *Value) bool // match: (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEQBMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35755,12 +35755,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v *Value) bool // match: (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEQBMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35775,12 +35775,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v *Value) bool // match: (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEQBMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35864,7 +35864,7 @@ func rewriteValueAMD64_OpGetHiFloat32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35876,7 +35876,7 @@ func rewriteValueAMD64_OpGetHiFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35888,7 +35888,7 @@ func rewriteValueAMD64_OpGetHiFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35900,7 +35900,7 @@ func rewriteValueAMD64_OpGetHiFloat64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35912,7 +35912,7 @@ func rewriteValueAMD64_OpGetHiInt16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35924,7 +35924,7 @@ func rewriteValueAMD64_OpGetHiInt16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35936,7 +35936,7 @@ func rewriteValueAMD64_OpGetHiInt32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35948,7 +35948,7 @@ func rewriteValueAMD64_OpGetHiInt32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35960,7 +35960,7 @@ func rewriteValueAMD64_OpGetHiInt64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35972,7 +35972,7 @@ func rewriteValueAMD64_OpGetHiInt64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35984,7 +35984,7 @@ func rewriteValueAMD64_OpGetHiInt8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35996,7 +35996,7 @@ func rewriteValueAMD64_OpGetHiInt8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36008,7 +36008,7 @@ func rewriteValueAMD64_OpGetHiUint16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36020,7 +36020,7 @@ func rewriteValueAMD64_OpGetHiUint16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36032,7 +36032,7 @@ func rewriteValueAMD64_OpGetHiUint32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36044,7 +36044,7 @@ func rewriteValueAMD64_OpGetHiUint32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36056,7 +36056,7 @@ func rewriteValueAMD64_OpGetHiUint64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36068,7 +36068,7 @@ func rewriteValueAMD64_OpGetHiUint64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36080,7 +36080,7 @@ func rewriteValueAMD64_OpGetHiUint8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36092,7 +36092,7 @@ func rewriteValueAMD64_OpGetHiUint8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36104,7 +36104,7 @@ func rewriteValueAMD64_OpGetLoFloat32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36116,7 +36116,7 @@ func rewriteValueAMD64_OpGetLoFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36128,7 +36128,7 @@ func rewriteValueAMD64_OpGetLoFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36140,7 +36140,7 @@ func rewriteValueAMD64_OpGetLoFloat64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36152,7 +36152,7 @@ func rewriteValueAMD64_OpGetLoInt16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36164,7 +36164,7 @@ func rewriteValueAMD64_OpGetLoInt16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36176,7 +36176,7 @@ func rewriteValueAMD64_OpGetLoInt32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36188,7 +36188,7 @@ func rewriteValueAMD64_OpGetLoInt32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36200,7 +36200,7 @@ func rewriteValueAMD64_OpGetLoInt64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36212,7 +36212,7 @@ func rewriteValueAMD64_OpGetLoInt64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36224,7 +36224,7 @@ func rewriteValueAMD64_OpGetLoInt8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36236,7 +36236,7 @@ func rewriteValueAMD64_OpGetLoInt8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36248,7 +36248,7 @@ func rewriteValueAMD64_OpGetLoUint16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36260,7 +36260,7 @@ func rewriteValueAMD64_OpGetLoUint16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36272,7 +36272,7 @@ func rewriteValueAMD64_OpGetLoUint32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36284,7 +36284,7 @@ func rewriteValueAMD64_OpGetLoUint32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36296,7 +36296,7 @@ func rewriteValueAMD64_OpGetLoUint64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36308,7 +36308,7 @@ func rewriteValueAMD64_OpGetLoUint64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36320,7 +36320,7 @@ func rewriteValueAMD64_OpGetLoUint8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36332,7 +36332,7 @@ func rewriteValueAMD64_OpGetLoUint8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36349,7 +36349,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36364,7 +36364,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36378,7 +36378,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36392,7 +36392,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36406,7 +36406,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36423,7 +36423,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36441,7 +36441,7 @@ func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36459,7 +36459,7 @@ func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36477,7 +36477,7 @@ func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36495,7 +36495,7 @@ func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36513,7 +36513,7 @@ func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36531,7 +36531,7 @@ func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36549,7 +36549,7 @@ func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36567,7 +36567,7 @@ func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36585,7 +36585,7 @@ func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36603,7 +36603,7 @@ func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36621,7 +36621,7 @@ func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36639,7 +36639,7 @@ func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36659,7 +36659,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36681,7 +36681,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36703,7 +36703,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36725,7 +36725,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36747,7 +36747,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36769,7 +36769,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36791,7 +36791,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36813,7 +36813,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36835,7 +36835,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36857,7 +36857,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36879,7 +36879,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36901,7 +36901,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36923,7 +36923,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36945,7 +36945,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36967,7 +36967,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36989,7 +36989,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37011,7 +37011,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37033,7 +37033,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37055,7 +37055,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37077,7 +37077,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37099,7 +37099,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37121,7 +37121,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37143,7 +37143,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37165,7 +37165,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37187,7 +37187,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37209,7 +37209,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37231,7 +37231,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37253,7 +37253,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37275,7 +37275,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37297,7 +37297,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37317,7 +37317,7 @@ func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37335,7 +37335,7 @@ func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37353,7 +37353,7 @@ func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37371,7 +37371,7 @@ func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37389,7 +37389,7 @@ func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37407,7 +37407,7 @@ func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37425,7 +37425,7 @@ func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37443,7 +37443,7 @@ func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37461,7 +37461,7 @@ func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37479,7 +37479,7 @@ func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37497,7 +37497,7 @@ func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37515,7 +37515,7 @@ func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37533,7 +37533,7 @@ func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37548,7 +37548,7 @@ func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37562,7 +37562,7 @@ func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37576,7 +37576,7 @@ func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37590,7 +37590,7 @@ func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37607,7 +37607,7 @@ func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37695,7 +37695,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37717,7 +37717,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37739,7 +37739,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37761,7 +37761,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37783,7 +37783,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37805,7 +37805,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37827,7 +37827,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37849,7 +37849,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37871,7 +37871,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37893,7 +37893,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37915,7 +37915,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37937,7 +37937,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37959,7 +37959,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37981,7 +37981,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38003,7 +38003,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38025,7 +38025,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38047,7 +38047,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38069,7 +38069,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38091,7 +38091,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38113,7 +38113,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38135,7 +38135,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38157,7 +38157,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38179,7 +38179,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38201,7 +38201,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38223,7 +38223,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38245,7 +38245,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38267,7 +38267,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38289,7 +38289,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38311,7 +38311,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38333,7 +38333,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38353,7 +38353,7 @@ func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38371,7 +38371,7 @@ func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38389,7 +38389,7 @@ func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38407,7 +38407,7 @@ func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38425,7 +38425,7 @@ func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38443,7 +38443,7 @@ func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38461,7 +38461,7 @@ func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38479,7 +38479,7 @@ func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38497,7 +38497,7 @@ func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38515,7 +38515,7 @@ func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38533,7 +38533,7 @@ func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38551,7 +38551,7 @@ func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38602,7 +38602,7 @@ func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38617,7 +38617,7 @@ func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38631,7 +38631,7 @@ func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38645,7 +38645,7 @@ func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38659,7 +38659,7 @@ func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38676,7 +38676,7 @@ func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38696,7 +38696,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38718,7 +38718,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38740,7 +38740,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38762,7 +38762,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38784,7 +38784,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38806,7 +38806,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39176,7 +39176,7 @@ func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39191,7 +39191,7 @@ func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39205,7 +39205,7 @@ func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39219,7 +39219,7 @@ func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39233,7 +39233,7 @@ func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39250,7 +39250,7 @@ func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39268,7 +39268,7 @@ func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39286,7 +39286,7 @@ func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39304,7 +39304,7 @@ func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39322,7 +39322,7 @@ func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39340,7 +39340,7 @@ func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39358,7 +39358,7 @@ func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39376,7 +39376,7 @@ func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39394,7 +39394,7 @@ func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39412,7 +39412,7 @@ func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39430,7 +39430,7 @@ func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39448,7 +39448,7 @@ func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39466,7 +39466,7 @@ func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39486,7 +39486,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39508,7 +39508,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39530,7 +39530,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39552,7 +39552,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39574,7 +39574,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39596,7 +39596,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39618,7 +39618,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39640,7 +39640,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39662,7 +39662,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39684,7 +39684,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39706,7 +39706,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39728,7 +39728,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39750,7 +39750,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39772,7 +39772,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39794,7 +39794,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39816,7 +39816,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39838,7 +39838,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39860,7 +39860,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39882,7 +39882,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39904,7 +39904,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39926,7 +39926,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39948,7 +39948,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39970,7 +39970,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39992,7 +39992,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40014,7 +40014,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40036,7 +40036,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40058,7 +40058,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40080,7 +40080,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40102,7 +40102,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40124,7 +40124,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40144,7 +40144,7 @@ func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40162,7 +40162,7 @@ func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40180,7 +40180,7 @@ func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40198,7 +40198,7 @@ func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40216,7 +40216,7 @@ func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40234,7 +40234,7 @@ func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40252,7 +40252,7 @@ func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40270,7 +40270,7 @@ func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40288,7 +40288,7 @@ func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40306,7 +40306,7 @@ func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40324,7 +40324,7 @@ func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40342,7 +40342,7 @@ func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40360,7 +40360,7 @@ func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40375,7 +40375,7 @@ func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40389,7 +40389,7 @@ func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40403,7 +40403,7 @@ func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40417,7 +40417,7 @@ func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40434,7 +40434,7 @@ func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40452,7 +40452,7 @@ func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40470,7 +40470,7 @@ func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40488,7 +40488,7 @@ func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40506,7 +40506,7 @@ func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40524,7 +40524,7 @@ func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40542,7 +40542,7 @@ func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40560,7 +40560,7 @@ func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40578,7 +40578,7 @@ func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40596,7 +40596,7 @@ func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40614,7 +40614,7 @@ func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40632,7 +40632,7 @@ func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40650,7 +40650,7 @@ func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40670,7 +40670,7 @@ func rewriteValueAMD64_OpLessMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40692,7 +40692,7 @@ func rewriteValueAMD64_OpLessMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40714,7 +40714,7 @@ func rewriteValueAMD64_OpLessMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40736,7 +40736,7 @@ func rewriteValueAMD64_OpLessMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40758,7 +40758,7 @@ func rewriteValueAMD64_OpLessMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40780,7 +40780,7 @@ func rewriteValueAMD64_OpLessMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40802,7 +40802,7 @@ func rewriteValueAMD64_OpLessMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40824,7 +40824,7 @@ func rewriteValueAMD64_OpLessMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40846,7 +40846,7 @@ func rewriteValueAMD64_OpLessMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40868,7 +40868,7 @@ func rewriteValueAMD64_OpLessMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40890,7 +40890,7 @@ func rewriteValueAMD64_OpLessMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40912,7 +40912,7 @@ func rewriteValueAMD64_OpLessMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40934,7 +40934,7 @@ func rewriteValueAMD64_OpLessMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40956,7 +40956,7 @@ func rewriteValueAMD64_OpLessMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40978,7 +40978,7 @@ func rewriteValueAMD64_OpLessMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41000,7 +41000,7 @@ func rewriteValueAMD64_OpLessMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41022,7 +41022,7 @@ func rewriteValueAMD64_OpLessMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41044,7 +41044,7 @@ func rewriteValueAMD64_OpLessMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41066,7 +41066,7 @@ func rewriteValueAMD64_OpLessMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41088,7 +41088,7 @@ func rewriteValueAMD64_OpLessMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41110,7 +41110,7 @@ func rewriteValueAMD64_OpLessMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41132,7 +41132,7 @@ func rewriteValueAMD64_OpLessMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41154,7 +41154,7 @@ func rewriteValueAMD64_OpLessMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41176,7 +41176,7 @@ func rewriteValueAMD64_OpLessMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41198,7 +41198,7 @@ func rewriteValueAMD64_OpLessMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41220,7 +41220,7 @@ func rewriteValueAMD64_OpLessMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41242,7 +41242,7 @@ func rewriteValueAMD64_OpLessMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41264,7 +41264,7 @@ func rewriteValueAMD64_OpLessMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41286,7 +41286,7 @@ func rewriteValueAMD64_OpLessMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41308,7 +41308,7 @@ func rewriteValueAMD64_OpLessMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41328,7 +41328,7 @@ func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41346,7 +41346,7 @@ func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41364,7 +41364,7 @@ func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41382,7 +41382,7 @@ func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41400,7 +41400,7 @@ func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41418,7 +41418,7 @@ func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41436,7 +41436,7 @@ func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41454,7 +41454,7 @@ func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41472,7 +41472,7 @@ func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41490,7 +41490,7 @@ func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41508,7 +41508,7 @@ func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41526,7 +41526,7 @@ func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45493,7 +45493,7 @@ func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45508,7 +45508,7 @@ func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45522,7 +45522,7 @@ func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45536,7 +45536,7 @@ func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45550,7 +45550,7 @@ func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45567,7 +45567,7 @@ func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45585,7 +45585,7 @@ func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45603,7 +45603,7 @@ func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45621,7 +45621,7 @@ func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45639,7 +45639,7 @@ func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45657,7 +45657,7 @@ func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45675,7 +45675,7 @@ func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45693,7 +45693,7 @@ func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45711,7 +45711,7 @@ func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45729,7 +45729,7 @@ func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45747,7 +45747,7 @@ func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45765,7 +45765,7 @@ func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45783,7 +45783,7 @@ func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45803,7 +45803,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45825,7 +45825,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45847,7 +45847,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45869,7 +45869,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45891,7 +45891,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45913,7 +45913,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45935,7 +45935,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45957,7 +45957,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45979,7 +45979,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46001,7 +46001,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46023,7 +46023,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46045,7 +46045,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46067,7 +46067,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46089,7 +46089,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46111,7 +46111,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46133,7 +46133,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46155,7 +46155,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46177,7 +46177,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46199,7 +46199,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46221,7 +46221,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46243,7 +46243,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46265,7 +46265,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46287,7 +46287,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46309,7 +46309,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46331,7 +46331,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46353,7 +46353,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46375,7 +46375,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46397,7 +46397,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46419,7 +46419,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46441,7 +46441,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46461,7 +46461,7 @@ func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46479,7 +46479,7 @@ func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46497,7 +46497,7 @@ func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46515,7 +46515,7 @@ func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46533,7 +46533,7 @@ func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46551,7 +46551,7 @@ func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46569,7 +46569,7 @@ func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46587,7 +46587,7 @@ func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46605,7 +46605,7 @@ func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46623,7 +46623,7 @@ func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46641,7 +46641,7 @@ func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46659,7 +46659,7 @@ func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -48556,11 +48556,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { // match: (RotateAllLeftMaskedInt32x16 [a] x mask) // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48574,11 +48574,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v *Value) bool { // match: (RotateAllLeftMaskedInt32x4 [a] x mask) // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48592,11 +48592,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v *Value) bool { // match: (RotateAllLeftMaskedInt32x8 [a] x mask) // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48610,11 +48610,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v *Value) bool { // match: (RotateAllLeftMaskedInt64x2 [a] x mask) // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48628,11 +48628,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v *Value) bool { // match: (RotateAllLeftMaskedInt64x4 [a] x mask) // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48646,11 +48646,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v *Value) bool { // match: (RotateAllLeftMaskedInt64x8 [a] x mask) // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48664,11 +48664,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v *Value) bool { // match: (RotateAllLeftMaskedUint32x16 [a] x mask) // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48682,11 +48682,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v *Value) bool { // match: (RotateAllLeftMaskedUint32x4 [a] x mask) // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48700,11 +48700,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v *Value) bool { // match: (RotateAllLeftMaskedUint32x8 [a] x mask) // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48718,11 +48718,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v *Value) bool { // match: (RotateAllLeftMaskedUint64x2 [a] x mask) // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48736,11 +48736,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v *Value) bool { // match: (RotateAllLeftMaskedUint64x4 [a] x mask) // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48754,11 +48754,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v *Value) bool { // match: (RotateAllLeftMaskedUint64x8 [a] x mask) // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48772,11 +48772,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v *Value) bool { // match: (RotateAllRightMaskedInt32x16 [a] x mask) // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48790,11 +48790,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v *Value) bool { // match: (RotateAllRightMaskedInt32x4 [a] x mask) // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48808,11 +48808,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v *Value) bool { // match: (RotateAllRightMaskedInt32x8 [a] x mask) // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48826,11 +48826,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v *Value) bool { // match: (RotateAllRightMaskedInt64x2 [a] x mask) // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48844,11 +48844,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v *Value) bool { // match: (RotateAllRightMaskedInt64x4 [a] x mask) // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48862,11 +48862,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v *Value) bool { // match: (RotateAllRightMaskedInt64x8 [a] x mask) // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48880,11 +48880,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v *Value) bool { // match: (RotateAllRightMaskedUint32x16 [a] x mask) // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48898,11 +48898,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v *Value) bool { // match: (RotateAllRightMaskedUint32x4 [a] x mask) // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48916,11 +48916,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v *Value) bool { // match: (RotateAllRightMaskedUint32x8 [a] x mask) // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48934,11 +48934,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v *Value) bool { // match: (RotateAllRightMaskedUint64x2 [a] x mask) // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48952,11 +48952,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v *Value) bool { // match: (RotateAllRightMaskedUint64x4 [a] x mask) // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48970,11 +48970,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v *Value) bool { // match: (RotateAllRightMaskedUint64x8 [a] x mask) // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49432,7 +49432,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49444,7 +49444,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49456,7 +49456,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49468,7 +49468,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49478,10 +49478,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v *Value) bool { // match: (RoundToEvenScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49491,10 +49491,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v *Value) bool { // match: (RoundToEvenScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49504,10 +49504,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v *Value) bool { // match: (RoundToEvenScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49517,10 +49517,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v *Value) bool { // match: (RoundToEvenScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49530,10 +49530,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v *Value) bool { // match: (RoundToEvenScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49543,10 +49543,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v *Value) bool { // match: (RoundToEvenScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49558,11 +49558,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49576,11 +49576,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49594,11 +49594,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49612,11 +49612,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49630,11 +49630,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49648,11 +49648,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49664,10 +49664,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v *Value) bool { // match: (RoundToEvenScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49677,10 +49677,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v *Value) bool { // match: (RoundToEvenScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49690,10 +49690,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v *Value) bool { // match: (RoundToEvenScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49703,10 +49703,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v *Value) bool { // match: (RoundToEvenScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49716,10 +49716,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v *Value) bool { // match: (RoundToEvenScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49729,10 +49729,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v *Value) bool { // match: (RoundToEvenScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49744,11 +49744,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49762,11 +49762,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49780,11 +49780,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49798,11 +49798,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49816,11 +49816,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49834,11 +49834,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -51715,7 +51715,7 @@ func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51729,7 +51729,7 @@ func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51743,7 +51743,7 @@ func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51757,7 +51757,7 @@ func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51771,7 +51771,7 @@ func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51785,7 +51785,7 @@ func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51799,7 +51799,7 @@ func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51813,7 +51813,7 @@ func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51827,7 +51827,7 @@ func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51841,7 +51841,7 @@ func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51855,7 +51855,7 @@ func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51869,7 +51869,7 @@ func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51883,7 +51883,7 @@ func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51897,7 +51897,7 @@ func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51911,7 +51911,7 @@ func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51925,7 +51925,7 @@ func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51939,7 +51939,7 @@ func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51953,7 +51953,7 @@ func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51967,7 +51967,7 @@ func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51981,7 +51981,7 @@ func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51995,7 +51995,7 @@ func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52009,7 +52009,7 @@ func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52023,7 +52023,7 @@ func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52037,7 +52037,7 @@ func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52051,7 +52051,7 @@ func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52065,7 +52065,7 @@ func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52079,7 +52079,7 @@ func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52093,7 +52093,7 @@ func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52107,7 +52107,7 @@ func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52121,7 +52121,7 @@ func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52135,7 +52135,7 @@ func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52149,7 +52149,7 @@ func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52163,7 +52163,7 @@ func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52177,7 +52177,7 @@ func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52191,7 +52191,7 @@ func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52205,7 +52205,7 @@ func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52219,7 +52219,7 @@ func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52233,7 +52233,7 @@ func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52247,7 +52247,7 @@ func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52261,7 +52261,7 @@ func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52274,12 +52274,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52294,12 +52294,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52314,12 +52314,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52334,12 +52334,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52354,12 +52354,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52374,12 +52374,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52394,12 +52394,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52414,12 +52414,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52434,12 +52434,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52454,12 +52454,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52474,12 +52474,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52494,12 +52494,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52514,12 +52514,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52534,12 +52534,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52554,12 +52554,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52574,12 +52574,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52594,12 +52594,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52614,12 +52614,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52630,7 +52630,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [int8(c)] x) + // result: (VPSLLW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52638,7 +52638,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52656,7 +52656,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [int8(c)] x) + // result: (VPSLLW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52664,7 +52664,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52682,7 +52682,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [int8(c)] x) + // result: (VPSLLW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52690,7 +52690,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52708,7 +52708,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [int8(c)] x) + // result: (VPSLLD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52716,7 +52716,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52734,7 +52734,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [int8(c)] x) + // result: (VPSLLD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52742,7 +52742,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52760,7 +52760,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [int8(c)] x) + // result: (VPSLLD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52768,7 +52768,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52786,7 +52786,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [int8(c)] x) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52794,7 +52794,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52812,7 +52812,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [int8(c)] x) + // result: (VPSLLQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52820,7 +52820,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52838,7 +52838,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [int8(c)] x) + // result: (VPSLLQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52846,7 +52846,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52866,7 +52866,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52875,7 +52875,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -52900,7 +52900,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52909,7 +52909,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -52934,7 +52934,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52943,7 +52943,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -52968,7 +52968,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52977,7 +52977,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53002,7 +53002,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53011,7 +53011,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53036,7 +53036,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53045,7 +53045,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53070,7 +53070,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53079,7 +53079,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53104,7 +53104,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53113,7 +53113,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53138,7 +53138,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53147,7 +53147,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53172,7 +53172,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53181,7 +53181,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53206,7 +53206,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53215,7 +53215,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53240,7 +53240,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53249,7 +53249,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53274,7 +53274,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53283,7 +53283,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53308,7 +53308,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53317,7 +53317,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53342,7 +53342,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53351,7 +53351,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53376,7 +53376,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53385,7 +53385,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53410,7 +53410,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53419,7 +53419,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53444,7 +53444,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53453,7 +53453,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53476,7 +53476,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [int8(c)] x) + // result: (VPSLLW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53484,7 +53484,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53502,7 +53502,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [int8(c)] x) + // result: (VPSLLW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53510,7 +53510,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53528,7 +53528,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [int8(c)] x) + // result: (VPSLLW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53536,7 +53536,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53554,7 +53554,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [int8(c)] x) + // result: (VPSLLD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53562,7 +53562,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53580,7 +53580,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [int8(c)] x) + // result: (VPSLLD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53588,7 +53588,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53606,7 +53606,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [int8(c)] x) + // result: (VPSLLD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53614,7 +53614,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53632,7 +53632,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [int8(c)] x) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53640,7 +53640,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53658,7 +53658,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [int8(c)] x) + // result: (VPSLLQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53666,7 +53666,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53684,7 +53684,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [int8(c)] x) + // result: (VPSLLQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53692,7 +53692,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53714,12 +53714,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53734,12 +53734,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53754,12 +53754,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53774,12 +53774,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53794,12 +53794,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53814,12 +53814,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53834,12 +53834,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53854,12 +53854,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53874,12 +53874,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53894,12 +53894,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53914,12 +53914,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53934,12 +53934,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53954,12 +53954,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53974,12 +53974,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53994,12 +53994,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54014,12 +54014,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54034,12 +54034,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54054,12 +54054,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54070,7 +54070,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt16x16 x (MOVQconst [c])) - // result: (VPSRAW256const [int8(c)] x) + // result: (VPSRAW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54078,7 +54078,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54096,7 +54096,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt16x32 x (MOVQconst [c])) - // result: (VPSRAW512const [int8(c)] x) + // result: (VPSRAW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54104,7 +54104,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54122,7 +54122,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt16x8 x (MOVQconst [c])) - // result: (VPSRAW128const [int8(c)] x) + // result: (VPSRAW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54130,7 +54130,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54148,7 +54148,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt32x16 x (MOVQconst [c])) - // result: (VPSRAD512const [int8(c)] x) + // result: (VPSRAD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54156,7 +54156,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54174,7 +54174,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt32x4 x (MOVQconst [c])) - // result: (VPSRAD128const [int8(c)] x) + // result: (VPSRAD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54182,7 +54182,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54200,7 +54200,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt32x8 x (MOVQconst [c])) - // result: (VPSRAD256const [int8(c)] x) + // result: (VPSRAD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54208,7 +54208,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54226,7 +54226,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt64x2 x (MOVQconst [c])) - // result: (VPSRAQ128const [int8(c)] x) + // result: (VPSRAQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54234,7 +54234,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54252,7 +54252,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt64x4 x (MOVQconst [c])) - // result: (VPSRAQ256const [int8(c)] x) + // result: (VPSRAQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54260,7 +54260,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54278,7 +54278,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt64x8 x (MOVQconst [c])) - // result: (VPSRAQ512const [int8(c)] x) + // result: (VPSRAQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54286,7 +54286,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54306,7 +54306,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54315,7 +54315,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54340,7 +54340,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54349,7 +54349,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54374,7 +54374,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54383,7 +54383,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54408,7 +54408,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54417,7 +54417,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54442,7 +54442,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54451,7 +54451,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRADMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54476,7 +54476,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54485,7 +54485,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRADMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54510,7 +54510,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54519,7 +54519,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54544,7 +54544,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54553,7 +54553,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54578,7 +54578,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54587,7 +54587,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54612,7 +54612,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54621,7 +54621,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54646,7 +54646,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54655,7 +54655,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54680,7 +54680,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54689,7 +54689,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54714,7 +54714,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54723,7 +54723,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLDMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54748,7 +54748,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54757,7 +54757,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLDMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54782,7 +54782,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54791,7 +54791,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLDMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54816,7 +54816,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54825,7 +54825,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54850,7 +54850,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54859,7 +54859,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54884,7 +54884,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54893,7 +54893,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54916,7 +54916,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint16x16 x (MOVQconst [c])) - // result: (VPSRLW256const [int8(c)] x) + // result: (VPSRLW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54924,7 +54924,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54942,7 +54942,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint16x32 x (MOVQconst [c])) - // result: (VPSRLW512const [int8(c)] x) + // result: (VPSRLW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54950,7 +54950,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54968,7 +54968,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint16x8 x (MOVQconst [c])) - // result: (VPSRLW128const [int8(c)] x) + // result: (VPSRLW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54976,7 +54976,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54994,7 +54994,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint32x16 x (MOVQconst [c])) - // result: (VPSRLD512const [int8(c)] x) + // result: (VPSRLD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55002,7 +55002,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55020,7 +55020,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint32x4 x (MOVQconst [c])) - // result: (VPSRLD128const [int8(c)] x) + // result: (VPSRLD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55028,7 +55028,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55046,7 +55046,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint32x8 x (MOVQconst [c])) - // result: (VPSRLD256const [int8(c)] x) + // result: (VPSRLD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55054,7 +55054,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55072,7 +55072,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint64x2 x (MOVQconst [c])) - // result: (VPSRLQ128const [int8(c)] x) + // result: (VPSRLQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55080,7 +55080,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55098,7 +55098,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint64x4 x (MOVQconst [c])) - // result: (VPSRLQ256const [int8(c)] x) + // result: (VPSRLQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55106,7 +55106,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55124,7 +55124,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint64x8 x (MOVQconst [c])) - // result: (VPSRLQ512const [int8(c)] x) + // result: (VPSRLQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55132,7 +55132,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -57976,7 +57976,7 @@ func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -57988,7 +57988,7 @@ func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -58000,7 +58000,7 @@ func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -58012,7 +58012,7 @@ func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -58022,10 +58022,10 @@ func rewriteValueAMD64_OpTruncScaledFloat32x16(v *Value) bool { // match: (TruncScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58035,10 +58035,10 @@ func rewriteValueAMD64_OpTruncScaledFloat32x4(v *Value) bool { // match: (TruncScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58048,10 +58048,10 @@ func rewriteValueAMD64_OpTruncScaledFloat32x8(v *Value) bool { // match: (TruncScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58061,10 +58061,10 @@ func rewriteValueAMD64_OpTruncScaledFloat64x2(v *Value) bool { // match: (TruncScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58074,10 +58074,10 @@ func rewriteValueAMD64_OpTruncScaledFloat64x4(v *Value) bool { // match: (TruncScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58087,10 +58087,10 @@ func rewriteValueAMD64_OpTruncScaledFloat64x8(v *Value) bool { // match: (TruncScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58102,11 +58102,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v *Value) bool { // match: (TruncScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58120,11 +58120,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v *Value) bool { // match: (TruncScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58138,11 +58138,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v *Value) bool { // match: (TruncScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58156,11 +58156,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v *Value) bool { // match: (TruncScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58174,11 +58174,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v *Value) bool { // match: (TruncScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58192,11 +58192,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v *Value) bool { // match: (TruncScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58208,10 +58208,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v *Value) bool { // match: (TruncScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58221,10 +58221,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat32x4(v *Value) bool { // match: (TruncScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58234,10 +58234,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat32x8(v *Value) bool { // match: (TruncScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58247,10 +58247,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat64x2(v *Value) bool { // match: (TruncScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58260,10 +58260,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v *Value) bool { // match: (TruncScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58273,10 +58273,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v *Value) bool { // match: (TruncScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58288,11 +58288,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v *Value) bool { // match: (TruncScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58306,11 +58306,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v *Value) bool { // match: (TruncScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58324,11 +58324,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v *Value) bool { // match: (TruncScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58342,11 +58342,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v *Value) bool { // match: (TruncScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58360,11 +58360,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v *Value) bool { // match: (TruncScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58378,11 +58378,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v *Value) bool { // match: (TruncScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 45ccb9c9998947..ee03075f524af8 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1670,12 +1670,42 @@ func opLen4_31(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []* } } -func plainPanicSimdImm(s *state) { - cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) - cmp.AuxInt = 0 - // TODO: make this a standalone panic instead of reusing the overflow panic. - // Or maybe after we implement the switch table this will be obsolete anyway. - s.check(cmp, ir.Syms.Panicoverflow) +func immJumpTable(s *state, idx *ssa.Value, intrinsicCall *ir.CallExpr, genOp func(*state, int)) *ssa.Value { + // Make blocks we'll need. + bEnd := s.f.NewBlock(ssa.BlockPlain) + + t := types.Types[types.TUINT8] + if !idx.Type.IsKind(types.TUINT8) { + panic("immJumpTable expects uint8 value") + } + // We will exhaust 0-255, so no need to check the bounds. + + b := s.curBlock + b.Kind = ssa.BlockJumpTable + b.Pos = intrinsicCall.Pos() + if base.Flag.Cfg.SpectreIndex { + // Potential Spectre vulnerability hardening? + idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, s.uintptrConstant(255)) + } + b.SetControl(idx) + targets := [256]*ssa.Block{} + for i := range 256 { + t := s.f.NewBlock(ssa.BlockPlain) + targets[i] = t + b.AddEdgeTo(t) + } + s.endBlock() + + for i, t := range targets { + s.startBlock(t) + genOp(s, i) + t.AddEdgeTo(bEnd) + s.endBlock() + } + + s.startBlock(bEnd) + ret := s.variable(intrinsicCall, intrinsicCall.Type()) + return ret } func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { @@ -1683,12 +1713,10 @@ func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallE if args[1].Op == ssa.OpConst8 { return s.newValue1I(op, t, args[1].AuxInt< Date: Fri, 8 Aug 2025 16:49:17 -0400 Subject: [PATCH 120/139] [dev.simd] cmd/compile: keep track of multiple rule file names in ssa/_gen This was a long-standing "we need to fix this" for simd work, this fixes it. I expect that simd peephole rule files will be coming soon and there will be more errors and we will be happier to have this. Change-Id: Iefffc43e3e2110939f8d406f6e5da7e9e2d55bd9 Reviewed-on: https://go-review.googlesource.com/c/go/+/694455 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- .../compile/internal/ssa/_gen/multiscanner.go | 117 ++++++++++++++++++ src/cmd/compile/internal/ssa/_gen/rulegen.go | 18 +-- 2 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 src/cmd/compile/internal/ssa/_gen/multiscanner.go diff --git a/src/cmd/compile/internal/ssa/_gen/multiscanner.go b/src/cmd/compile/internal/ssa/_gen/multiscanner.go new file mode 100644 index 00000000000000..1c7520cadef9b8 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/multiscanner.go @@ -0,0 +1,117 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "io" +) + +// NamedScanner is a simple struct to pair a name with a Scanner. +type NamedScanner struct { + Name string + Scanner *bufio.Scanner +} + +// NamedReader is a simple struct to pair a name with a Reader, +// which will be converted to a Scanner using bufio.NewScanner. +type NamedReader struct { + Name string + Reader io.Reader +} + +// MultiScanner scans over multiple bufio.Scanners as if they were a single stream. +// It also keeps track of the name of the current scanner and the line number. +type MultiScanner struct { + scanners []NamedScanner + scannerIdx int + line int + totalLine int + err error +} + +// NewMultiScanner creates a new MultiScanner from slice of NamedScanners. +func NewMultiScanner(scanners []NamedScanner) *MultiScanner { + return &MultiScanner{ + scanners: scanners, + scannerIdx: -1, // Start before the first scanner + } +} + +// MultiScannerFromReaders creates a new MultiScanner from a slice of NamedReaders. +func MultiScannerFromReaders(readers []NamedReader) *MultiScanner { + var scanners []NamedScanner + for _, r := range readers { + scanners = append(scanners, NamedScanner{ + Name: r.Name, + Scanner: bufio.NewScanner(r.Reader), + }) + } + return NewMultiScanner(scanners) +} + +// Scan advances the scanner to the next token, which will then be +// available through the Text method. It returns false when the scan stops, +// either by reaching the end of the input or an error. +// After Scan returns false, the Err method will return any error that +// occurred during scanning, except that if it was io.EOF, Err +// will return nil. +func (ms *MultiScanner) Scan() bool { + if ms.scannerIdx == -1 { + ms.scannerIdx = 0 + } + + for ms.scannerIdx < len(ms.scanners) { + current := ms.scanners[ms.scannerIdx] + if current.Scanner.Scan() { + ms.line++ + ms.totalLine++ + return true + } + if err := current.Scanner.Err(); err != nil { + ms.err = err + return false + } + // Move to the next scanner + ms.scannerIdx++ + ms.line = 0 + } + + return false +} + +// Text returns the most recent token generated by a call to Scan. +func (ms *MultiScanner) Text() string { + if ms.scannerIdx < 0 || ms.scannerIdx >= len(ms.scanners) { + return "" + } + return ms.scanners[ms.scannerIdx].Scanner.Text() +} + +// Err returns the first non-EOF error that was encountered by the MultiScanner. +func (ms *MultiScanner) Err() error { + return ms.err +} + +// Name returns the name of the current scanner. +func (ms *MultiScanner) Name() string { + if ms.scannerIdx < 0 { + return "" + } + if ms.scannerIdx >= len(ms.scanners) { + return "" + } + return ms.scanners[ms.scannerIdx].Name +} + +// Line returns the current line number within the current scanner. +func (ms *MultiScanner) Line() int { + return ms.line +} + +// TotalLine returns the total number of lines scanned across all scanners. +func (ms *MultiScanner) TotalLine() int { + return ms.totalLine +} diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index 57fd2b05943c78..d4ca1aef22279f 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -94,9 +94,11 @@ func genSplitLoadRules(arch arch) { genRulesSuffix(arch, "splitload") } func genLateLowerRules(arch arch) { genRulesSuffix(arch, "latelower") } func genRulesSuffix(arch arch, suff string) { + var readers []NamedReader // Open input file. var text io.Reader - text, err := os.Open(arch.name + suff + ".rules") + name := arch.name + suff + ".rules" + text, err := os.Open(name) if err != nil { if suff == "" { // All architectures must have a plain rules file. @@ -105,12 +107,14 @@ func genRulesSuffix(arch arch, suff string) { // Some architectures have bonus rules files that others don't share. That's fine. return } + readers = append(readers, NamedReader{name, text}) // Check for file of SIMD rules to add if suff == "" { - simdtext, err := os.Open("simd" + arch.name + ".rules") + simdname := "simd" + arch.name + ".rules" + simdtext, err := os.Open(simdname) if err == nil { - text = io.MultiReader(text, simdtext) + readers = append(readers, NamedReader{simdname, simdtext}) } } @@ -119,12 +123,12 @@ func genRulesSuffix(arch arch, suff string) { oprules := map[string][]Rule{} // read rule file - scanner := bufio.NewScanner(text) + scanner := MultiScannerFromReaders(readers) rule := "" var lineno int var ruleLineno int // line number of "=>" for scanner.Scan() { - lineno++ + lineno = scanner.Line() line := scanner.Text() if i := strings.Index(line, "//"); i >= 0 { // Remove comments. Note that this isn't string safe, so @@ -151,7 +155,7 @@ func genRulesSuffix(arch arch, suff string) { break // continuing the line can't help, and it will only make errors worse } - loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno) + loc := fmt.Sprintf("%s:%d", scanner.Name(), ruleLineno) for _, rule2 := range expandOr(rule) { r := Rule{Rule: rule2, Loc: loc} if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) { @@ -171,7 +175,7 @@ func genRulesSuffix(arch arch, suff string) { log.Fatalf("scanner failed: %v\n", err) } if balance(rule) != 0 { - log.Fatalf("%s.rules:%d: unbalanced rule: %v\n", arch.name, lineno, rule) + log.Fatalf("%s:%d: unbalanced rule: %v\n", scanner.Name(), lineno, rule) } // Order all the ops. From 2fd49d8f304a096482096edd1a3e9dc66c33df60 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 11 Aug 2025 17:20:48 +0000 Subject: [PATCH 121/139] [dev.simd] simd: imm doc improve This CL is generated by CL 694775. Change-Id: I3d551b1a7981c6c35c1ecf139a38b6e07323a861 Reviewed-on: https://go-review.googlesource.com/c/go/+/694795 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/ops_amd64.go | 488 +++++++++++++++++++++--------------------- 1 file changed, 244 insertions(+), 244 deletions(-) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5475719e63166f..01d939c9ed42f2 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1412,42 +1412,42 @@ func (x Float64x4) Ceil() Float64x4 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilScaled(prec uint8) Float32x4 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilScaled(prec uint8) Float32x8 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilScaled(prec uint8) Float32x16 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilScaled(prec uint8) Float64x2 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilScaled(prec uint8) Float64x4 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilScaled(prec uint8) Float64x8 @@ -1458,7 +1458,7 @@ func (x Float64x8) CeilScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -1467,7 +1467,7 @@ func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -1476,7 +1476,7 @@ func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -1485,7 +1485,7 @@ func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -1494,7 +1494,7 @@ func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -1503,7 +1503,7 @@ func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -1512,42 +1512,42 @@ func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) CeilScaledResidue(prec uint8) Float32x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) CeilScaledResidue(prec uint8) Float32x8 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) CeilScaledResidue(prec uint8) Float32x16 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) CeilScaledResidue(prec uint8) Float64x2 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 @@ -1558,7 +1558,7 @@ func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -1567,7 +1567,7 @@ func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -1576,7 +1576,7 @@ func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -1585,7 +1585,7 @@ func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -1594,7 +1594,7 @@ func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -1603,7 +1603,7 @@ func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 @@ -2648,42 +2648,42 @@ func (x Float64x4) Floor() Float64x4 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorScaled(prec uint8) Float32x4 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorScaled(prec uint8) Float32x8 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorScaled(prec uint8) Float32x16 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorScaled(prec uint8) Float64x2 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorScaled(prec uint8) Float64x4 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorScaled(prec uint8) Float64x8 @@ -2694,7 +2694,7 @@ func (x Float64x8) FloorScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -2703,7 +2703,7 @@ func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -2712,7 +2712,7 @@ func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -2721,7 +2721,7 @@ func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -2730,7 +2730,7 @@ func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -2739,7 +2739,7 @@ func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -2748,42 +2748,42 @@ func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 @@ -2794,7 +2794,7 @@ func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -2803,7 +2803,7 @@ func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -2812,7 +2812,7 @@ func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -2821,7 +2821,7 @@ func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -2830,7 +2830,7 @@ func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -2839,7 +2839,7 @@ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 @@ -2851,7 +2851,7 @@ func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 @@ -2861,7 +2861,7 @@ func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 @@ -2871,7 +2871,7 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 @@ -2884,7 +2884,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 @@ -2895,7 +2895,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x1 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 @@ -2906,7 +2906,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x3 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 @@ -2921,7 +2921,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 @@ -2934,7 +2934,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 @@ -2947,7 +2947,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 @@ -2961,7 +2961,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 @@ -2973,7 +2973,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mas // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 @@ -2985,7 +2985,7 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mas // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 @@ -3040,56 +3040,56 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRB, CPU Feature: AVX512BW func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRW, CPU Feature: AVX512BW func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRD, CPU Feature: AVX func (x Int32x4) GetElem(index uint8) int32 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRQ, CPU Feature: AVX func (x Int64x2) GetElem(index uint8) int64 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRB, CPU Feature: AVX512BW func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRW, CPU Feature: AVX512BW func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRD, CPU Feature: AVX func (x Uint32x4) GetElem(index uint8) uint32 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRQ, CPU Feature: AVX func (x Uint64x2) GetElem(index uint8) uint64 @@ -8096,84 +8096,84 @@ func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 @@ -8184,7 +8184,7 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 @@ -8193,7 +8193,7 @@ func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 @@ -8202,7 +8202,7 @@ func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 @@ -8211,7 +8211,7 @@ func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 @@ -8220,7 +8220,7 @@ func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 @@ -8229,7 +8229,7 @@ func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 @@ -8238,7 +8238,7 @@ func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 @@ -8247,7 +8247,7 @@ func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 @@ -8256,7 +8256,7 @@ func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 @@ -8265,7 +8265,7 @@ func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 @@ -8274,7 +8274,7 @@ func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 @@ -8283,7 +8283,7 @@ func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 @@ -8292,84 +8292,84 @@ func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 @@ -8380,7 +8380,7 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 @@ -8389,7 +8389,7 @@ func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 @@ -8398,7 +8398,7 @@ func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 @@ -8407,7 +8407,7 @@ func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 @@ -8416,7 +8416,7 @@ func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 @@ -8425,7 +8425,7 @@ func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 @@ -8434,7 +8434,7 @@ func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 @@ -8443,7 +8443,7 @@ func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 @@ -8452,7 +8452,7 @@ func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 @@ -8461,7 +8461,7 @@ func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 @@ -8470,7 +8470,7 @@ func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 @@ -8479,7 +8479,7 @@ func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8 @@ -8806,42 +8806,42 @@ func (x Float64x4) RoundToEven() Float64x4 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundToEvenScaled(prec uint8) Float32x4 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundToEvenScaled(prec uint8) Float32x8 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundToEvenScaled(prec uint8) Float32x16 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundToEvenScaled(prec uint8) Float64x2 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 @@ -8852,7 +8852,7 @@ func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -8861,7 +8861,7 @@ func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -8870,7 +8870,7 @@ func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -8879,7 +8879,7 @@ func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -8888,7 +8888,7 @@ func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -8897,7 +8897,7 @@ func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -8906,42 +8906,42 @@ func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) RoundToEvenScaledResidue(prec uint8) Float32x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) RoundToEvenScaledResidue(prec uint8) Float32x8 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) RoundToEvenScaledResidue(prec uint8) Float32x16 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) RoundToEvenScaledResidue(prec uint8) Float64x2 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 @@ -8952,7 +8952,7 @@ func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -8961,7 +8961,7 @@ func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -8970,7 +8970,7 @@ func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -8979,7 +8979,7 @@ func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) F // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -8988,7 +8988,7 @@ func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -8997,7 +8997,7 @@ func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 @@ -9082,56 +9082,56 @@ func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRB, CPU Feature: AVX func (x Int8x16) SetElem(index uint8, y int8) Int8x16 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRW, CPU Feature: AVX func (x Int16x8) SetElem(index uint8, y int16) Int16x8 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRD, CPU Feature: AVX func (x Int32x4) SetElem(index uint8, y int32) Int32x4 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRQ, CPU Feature: AVX func (x Int64x2) SetElem(index uint8, y int64) Int64x2 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRB, CPU Feature: AVX func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRW, CPU Feature: AVX func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRD, CPU Feature: AVX func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRQ, CPU Feature: AVX func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 @@ -9437,7 +9437,7 @@ func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftConcat(shift uint8, y Int16x8) Int16x8 @@ -9445,7 +9445,7 @@ func (x Int16x8) ShiftAllLeftConcat(shift uint8, y Int16x8) Int16x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftConcat(shift uint8, y Int16x16) Int16x16 @@ -9453,7 +9453,7 @@ func (x Int16x16) ShiftAllLeftConcat(shift uint8, y Int16x16) Int16x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftConcat(shift uint8, y Int16x32) Int16x32 @@ -9461,7 +9461,7 @@ func (x Int16x32) ShiftAllLeftConcat(shift uint8, y Int16x32) Int16x32 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftConcat(shift uint8, y Int32x4) Int32x4 @@ -9469,7 +9469,7 @@ func (x Int32x4) ShiftAllLeftConcat(shift uint8, y Int32x4) Int32x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftConcat(shift uint8, y Int32x8) Int32x8 @@ -9477,7 +9477,7 @@ func (x Int32x8) ShiftAllLeftConcat(shift uint8, y Int32x8) Int32x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftConcat(shift uint8, y Int32x16) Int32x16 @@ -9485,7 +9485,7 @@ func (x Int32x16) ShiftAllLeftConcat(shift uint8, y Int32x16) Int32x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftConcat(shift uint8, y Int64x2) Int64x2 @@ -9493,7 +9493,7 @@ func (x Int64x2) ShiftAllLeftConcat(shift uint8, y Int64x2) Int64x2 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftConcat(shift uint8, y Int64x4) Int64x4 @@ -9501,7 +9501,7 @@ func (x Int64x4) ShiftAllLeftConcat(shift uint8, y Int64x4) Int64x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftConcat(shift uint8, y Int64x8) Int64x8 @@ -9509,7 +9509,7 @@ func (x Int64x8) ShiftAllLeftConcat(shift uint8, y Int64x8) Int64x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftConcat(shift uint8, y Uint16x8) Uint16x8 @@ -9517,7 +9517,7 @@ func (x Uint16x8) ShiftAllLeftConcat(shift uint8, y Uint16x8) Uint16x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftConcat(shift uint8, y Uint16x16) Uint16x16 @@ -9525,7 +9525,7 @@ func (x Uint16x16) ShiftAllLeftConcat(shift uint8, y Uint16x16) Uint16x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftConcat(shift uint8, y Uint16x32) Uint16x32 @@ -9533,7 +9533,7 @@ func (x Uint16x32) ShiftAllLeftConcat(shift uint8, y Uint16x32) Uint16x32 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftConcat(shift uint8, y Uint32x4) Uint32x4 @@ -9541,7 +9541,7 @@ func (x Uint32x4) ShiftAllLeftConcat(shift uint8, y Uint32x4) Uint32x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftConcat(shift uint8, y Uint32x8) Uint32x8 @@ -9549,7 +9549,7 @@ func (x Uint32x8) ShiftAllLeftConcat(shift uint8, y Uint32x8) Uint32x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftConcat(shift uint8, y Uint32x16) Uint32x16 @@ -9557,7 +9557,7 @@ func (x Uint32x16) ShiftAllLeftConcat(shift uint8, y Uint32x16) Uint32x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftConcat(shift uint8, y Uint64x2) Uint64x2 @@ -9565,7 +9565,7 @@ func (x Uint64x2) ShiftAllLeftConcat(shift uint8, y Uint64x2) Uint64x2 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4 @@ -9573,7 +9573,7 @@ func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 @@ -9585,7 +9585,7 @@ func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 @@ -9595,7 +9595,7 @@ func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 @@ -9605,7 +9605,7 @@ func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 @@ -9615,7 +9615,7 @@ func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 @@ -9625,7 +9625,7 @@ func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 @@ -9635,7 +9635,7 @@ func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 @@ -9645,7 +9645,7 @@ func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 @@ -9655,7 +9655,7 @@ func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 @@ -9665,7 +9665,7 @@ func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 @@ -9675,7 +9675,7 @@ func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 @@ -9685,7 +9685,7 @@ func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 @@ -9695,7 +9695,7 @@ func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask1 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 @@ -9705,7 +9705,7 @@ func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask1 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 @@ -9715,7 +9715,7 @@ func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 @@ -9725,7 +9725,7 @@ func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 @@ -9735,7 +9735,7 @@ func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask3 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 @@ -9745,7 +9745,7 @@ func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 @@ -9755,7 +9755,7 @@ func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 @@ -9985,7 +9985,7 @@ func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightConcat(shift uint8, y Int16x8) Int16x8 @@ -9993,7 +9993,7 @@ func (x Int16x8) ShiftAllRightConcat(shift uint8, y Int16x8) Int16x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightConcat(shift uint8, y Int16x16) Int16x16 @@ -10001,7 +10001,7 @@ func (x Int16x16) ShiftAllRightConcat(shift uint8, y Int16x16) Int16x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightConcat(shift uint8, y Int16x32) Int16x32 @@ -10009,7 +10009,7 @@ func (x Int16x32) ShiftAllRightConcat(shift uint8, y Int16x32) Int16x32 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightConcat(shift uint8, y Int32x4) Int32x4 @@ -10017,7 +10017,7 @@ func (x Int32x4) ShiftAllRightConcat(shift uint8, y Int32x4) Int32x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightConcat(shift uint8, y Int32x8) Int32x8 @@ -10025,7 +10025,7 @@ func (x Int32x8) ShiftAllRightConcat(shift uint8, y Int32x8) Int32x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightConcat(shift uint8, y Int32x16) Int32x16 @@ -10033,7 +10033,7 @@ func (x Int32x16) ShiftAllRightConcat(shift uint8, y Int32x16) Int32x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightConcat(shift uint8, y Int64x2) Int64x2 @@ -10041,7 +10041,7 @@ func (x Int64x2) ShiftAllRightConcat(shift uint8, y Int64x2) Int64x2 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightConcat(shift uint8, y Int64x4) Int64x4 @@ -10049,7 +10049,7 @@ func (x Int64x4) ShiftAllRightConcat(shift uint8, y Int64x4) Int64x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightConcat(shift uint8, y Int64x8) Int64x8 @@ -10057,7 +10057,7 @@ func (x Int64x8) ShiftAllRightConcat(shift uint8, y Int64x8) Int64x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightConcat(shift uint8, y Uint16x8) Uint16x8 @@ -10065,7 +10065,7 @@ func (x Uint16x8) ShiftAllRightConcat(shift uint8, y Uint16x8) Uint16x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightConcat(shift uint8, y Uint16x16) Uint16x16 @@ -10073,7 +10073,7 @@ func (x Uint16x16) ShiftAllRightConcat(shift uint8, y Uint16x16) Uint16x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32 @@ -10081,7 +10081,7 @@ func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 @@ -10089,7 +10089,7 @@ func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 @@ -10097,7 +10097,7 @@ func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 @@ -10105,7 +10105,7 @@ func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 @@ -10113,7 +10113,7 @@ func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 @@ -10121,7 +10121,7 @@ func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 @@ -10133,7 +10133,7 @@ func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 @@ -10143,7 +10143,7 @@ func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 @@ -10153,7 +10153,7 @@ func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 @@ -10163,7 +10163,7 @@ func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 @@ -10173,7 +10173,7 @@ func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 @@ -10183,7 +10183,7 @@ func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 @@ -10193,7 +10193,7 @@ func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 @@ -10203,7 +10203,7 @@ func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 @@ -10213,7 +10213,7 @@ func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 @@ -10223,7 +10223,7 @@ func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 @@ -10233,7 +10233,7 @@ func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 @@ -10243,7 +10243,7 @@ func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 @@ -10253,7 +10253,7 @@ func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 @@ -10263,7 +10263,7 @@ func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 @@ -10273,7 +10273,7 @@ func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 @@ -10283,7 +10283,7 @@ func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 @@ -10293,7 +10293,7 @@ func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 @@ -10303,7 +10303,7 @@ func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 @@ -12090,42 +12090,42 @@ func (x Float64x4) Trunc() Float64x4 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncScaled(prec uint8) Float32x4 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncScaled(prec uint8) Float32x8 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncScaled(prec uint8) Float32x16 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncScaled(prec uint8) Float64x2 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncScaled(prec uint8) Float64x4 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncScaled(prec uint8) Float64x8 @@ -12136,7 +12136,7 @@ func (x Float64x8) TruncScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -12145,7 +12145,7 @@ func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -12154,7 +12154,7 @@ func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -12163,7 +12163,7 @@ func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -12172,7 +12172,7 @@ func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -12181,7 +12181,7 @@ func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -12190,42 +12190,42 @@ func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) TruncScaledResidue(prec uint8) Float32x4 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) TruncScaledResidue(prec uint8) Float32x8 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) TruncScaledResidue(prec uint8) Float32x16 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) TruncScaledResidue(prec uint8) Float64x2 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 @@ -12236,7 +12236,7 @@ func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -12245,7 +12245,7 @@ func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -12254,7 +12254,7 @@ func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -12263,7 +12263,7 @@ func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -12272,7 +12272,7 @@ func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -12281,7 +12281,7 @@ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 From 1755c2909d93182c7aac0ac2ef610a7a94740b02 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 15:58:31 -0400 Subject: [PATCH 122/139] [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694857. Change-Id: I9745fa8c9b2e3f49bd2cff5ff6b5578c0c67bfa1 Reviewed-on: https://go-review.googlesource.com/c/go/+/694915 Reviewed-by: David Chase Auto-Submit: Austin Clements Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 15 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 14 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 6 + .../internal/ssa/_gen/simdgenericOps.go | 6 + src/cmd/compile/internal/ssa/opGen.go | 141 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 77 +++++++++- .../compile/internal/ssagen/simdintrinsics.go | 6 + src/simd/ops_amd64.go | 44 +++++- 8 files changed, 294 insertions(+), 15 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 274602c0a757ce..e6bbdc03def1e4 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -236,9 +236,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULDQ256, ssa.OpAMD64VPMULUDQ128, ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VPMULHW512, ssa.OpAMD64VPMULHUW128, ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPMULHUW512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, @@ -481,8 +484,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, @@ -1362,8 +1368,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 8ff638808abc3f..abfa10020dec49 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -936,12 +936,18 @@ (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) (MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint32x8 ...) => (VPMULUDQ256 ...) -(MulHighInt16x8 ...) => (VPMULHUW128 ...) -(MulHighInt16x16 ...) => (VPMULHUW256 ...) +(MulHighInt16x8 ...) => (VPMULHW128 ...) +(MulHighInt16x16 ...) => (VPMULHW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) -(MulHighMaskedInt16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighUint16x8 ...) => (VPMULHUW128 ...) +(MulHighUint16x16 ...) => (VPMULHUW256 ...) +(MulHighUint16x32 ...) => (VPMULHUW512 ...) +(MulHighMaskedInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) (MulHighMaskedInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedInt16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) (MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) (MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 164ca7a344487d..386415ac41547b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -511,10 +511,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 416c53c44542b9..2378f196453927 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -859,6 +859,12 @@ func simdGenericOps() []opData { {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "MulHighUint16x32", argLength: 2, commutative: true}, {name: "MulInt16x8", argLength: 2, commutative: true}, {name: "MulInt16x16", argLength: 2, commutative: true}, {name: "MulInt16x32", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d4e4f710a76c33..77527c83b8c751 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1734,10 +1734,16 @@ const ( OpAMD64VPMULDQ256 OpAMD64VPMULHUW128 OpAMD64VPMULHUW256 + OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked128 + OpAMD64VPMULHUWMasked256 OpAMD64VPMULHUWMasked512 + OpAMD64VPMULHW128 + OpAMD64VPMULHW256 OpAMD64VPMULHW512 + OpAMD64VPMULHWMasked128 OpAMD64VPMULHWMasked256 + OpAMD64VPMULHWMasked512 OpAMD64VPMULLD128 OpAMD64VPMULLD256 OpAMD64VPMULLD512 @@ -5461,6 +5467,12 @@ const ( OpMulHighMaskedInt16x8 OpMulHighMaskedInt16x16 OpMulHighMaskedInt16x32 + OpMulHighMaskedUint16x8 + OpMulHighMaskedUint16x16 + OpMulHighMaskedUint16x32 + OpMulHighUint16x8 + OpMulHighUint16x16 + OpMulHighUint16x32 OpMulInt16x8 OpMulInt16x16 OpMulInt16x32 @@ -27230,6 +27242,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPMULHUWMasked128", argLen: 3, @@ -27246,6 +27273,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULHUWMasked512", argLen: 3, @@ -27262,6 +27305,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULHW512", argLen: 2, @@ -27277,6 +27350,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULHWMasked256", argLen: 3, @@ -27293,6 +27382,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULLD128", argLen: 2, @@ -67968,6 +68073,42 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulHighMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulHighMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulHighUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "MulInt16x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 865b404d1419da..fbe8a448d8d14a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3151,13 +3151,13 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VMULPD512 return true case OpMulHighInt16x16: - v.Op = OpAMD64VPMULHUW256 + v.Op = OpAMD64VPMULHW256 return true case OpMulHighInt16x32: v.Op = OpAMD64VPMULHW512 return true case OpMulHighInt16x8: - v.Op = OpAMD64VPMULHUW128 + v.Op = OpAMD64VPMULHW128 return true case OpMulHighMaskedInt16x16: return rewriteValueAMD64_OpMulHighMaskedInt16x16(v) @@ -3165,6 +3165,21 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulHighMaskedInt16x32(v) case OpMulHighMaskedInt16x8: return rewriteValueAMD64_OpMulHighMaskedInt16x8(v) + case OpMulHighMaskedUint16x16: + return rewriteValueAMD64_OpMulHighMaskedUint16x16(v) + case OpMulHighMaskedUint16x32: + return rewriteValueAMD64_OpMulHighMaskedUint16x32(v) + case OpMulHighMaskedUint16x8: + return rewriteValueAMD64_OpMulHighMaskedUint16x8(v) + case OpMulHighUint16x16: + v.Op = OpAMD64VPMULHUW256 + return true + case OpMulHighUint16x32: + v.Op = OpAMD64VPMULHUW512 + return true + case OpMulHighUint16x8: + v.Op = OpAMD64VPMULHUW128 + return true case OpMulInt16x16: v.Op = OpAMD64VPMULLW256 return true @@ -44729,12 +44744,12 @@ func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MulHighMaskedInt16x32 x y mask) - // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) + v.reset(OpAMD64VPMULHWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -44747,6 +44762,60 @@ func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MulHighMaskedInt16x8 x y mask) + // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedUint16x16 x y mask) + // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedUint16x32 x y mask) + // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedUint16x8 x y mask) // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 4be74d913666eb..02d68a57ccc542 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -950,9 +950,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 01d939c9ed42f2..32830e8d204222 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5862,12 +5862,12 @@ func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHUW, CPU Feature: AVX +// Asm: VPMULHW, CPU Feature: AVX func (x Int16x8) MulHigh(y Int16x8) Int16x8 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHUW, CPU Feature: AVX2 +// Asm: VPMULHW, CPU Feature: AVX2 func (x Int16x16) MulHigh(y Int16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result. @@ -5875,13 +5875,28 @@ func (x Int16x16) MulHigh(y Int16x16) Int16x16 // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHigh(y Int16x32) Int16x32 +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + /* MulHighMasked */ // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result. @@ -5895,9 +5910,30 @@ func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 + /* MulMasked */ // MulMasked multiplies corresponding elements of two vectors. From 667add4f1ccc61f11c0ac98ef5d3119a24ff3fff Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 16:02:00 -0400 Subject: [PATCH 123/139] [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694859. Change-Id: I18bd076e26e93bc2fb0e761de26511138e95055f Reviewed-on: https://go-review.googlesource.com/c/go/+/694916 LUCI-TryBot-Result: Go LUCI Auto-Submit: Austin Clements Reviewed-by: David Chase Reviewed-by: Junyang Shao --- src/simd/ops_amd64.go | 2474 ++++++++++++++++++++--------------------- 1 file changed, 1237 insertions(+), 1237 deletions(-) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 32830e8d204222..43f36de2b553c1 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -18,7 +18,7 @@ func (x Int8x32) Abs() Int8x32 // Abs computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x64) Abs() Int8x64 // Abs computes the absolute value of each element. @@ -33,7 +33,7 @@ func (x Int16x16) Abs() Int16x16 // Abs computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x32) Abs() Int16x32 // Abs computes the absolute value of each element. @@ -48,22 +48,22 @@ func (x Int32x8) Abs() Int32x8 // Abs computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x16) Abs() Int32x16 // Abs computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x2) Abs() Int64x2 // Abs computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x4) Abs() Int64x4 // Abs computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x8) Abs() Int64x8 /* AbsMasked */ @@ -72,84 +72,84 @@ func (x Int64x8) Abs() Int64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x16) AbsMasked(mask Mask8x16) Int8x16 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x32) AbsMasked(mask Mask8x32) Int8x32 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x64) AbsMasked(mask Mask8x64) Int8x64 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x8) AbsMasked(mask Mask16x8) Int16x8 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x16) AbsMasked(mask Mask16x16) Int16x16 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x32) AbsMasked(mask Mask16x32) Int16x32 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x4) AbsMasked(mask Mask32x4) Int32x4 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x8) AbsMasked(mask Mask32x8) Int32x8 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x16) AbsMasked(mask Mask32x16) Int32x16 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x2) AbsMasked(mask Mask64x2) Int64x2 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x4) AbsMasked(mask Mask64x4) Int64x4 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x8) AbsMasked(mask Mask64x8) Int64x8 /* Add */ @@ -166,7 +166,7 @@ func (x Float32x8) Add(y Float32x8) Float32x8 // Add adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x16) Add(y Float32x16) Float32x16 // Add adds corresponding elements of two vectors. @@ -181,7 +181,7 @@ func (x Float64x4) Add(y Float64x4) Float64x4 // Add adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x8) Add(y Float64x8) Float64x8 // Add adds corresponding elements of two vectors. @@ -196,7 +196,7 @@ func (x Int8x32) Add(y Int8x32) Int8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x64) Add(y Int8x64) Int8x64 // Add adds corresponding elements of two vectors. @@ -211,7 +211,7 @@ func (x Int16x16) Add(y Int16x16) Int16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x32) Add(y Int16x32) Int16x32 // Add adds corresponding elements of two vectors. @@ -226,7 +226,7 @@ func (x Int32x8) Add(y Int32x8) Int32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x16) Add(y Int32x16) Int32x16 // Add adds corresponding elements of two vectors. @@ -241,7 +241,7 @@ func (x Int64x4) Add(y Int64x4) Int64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x8) Add(y Int64x8) Int64x8 // Add adds corresponding elements of two vectors. @@ -256,7 +256,7 @@ func (x Uint8x32) Add(y Uint8x32) Uint8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x64) Add(y Uint8x64) Uint8x64 // Add adds corresponding elements of two vectors. @@ -271,7 +271,7 @@ func (x Uint16x16) Add(y Uint16x16) Uint16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x32) Add(y Uint16x32) Uint16x32 // Add adds corresponding elements of two vectors. @@ -286,7 +286,7 @@ func (x Uint32x8) Add(y Uint32x8) Uint32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x16) Add(y Uint32x16) Uint32x16 // Add adds corresponding elements of two vectors. @@ -301,7 +301,7 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x8) Add(y Uint64x8) Uint64x8 /* AddDotProdPairsSaturated */ @@ -430,210 +430,210 @@ func (x Int8x64) AddDotProdQuadrupleSaturatedMasked(y Uint8x64, z Int32x16, mask // // This operation is applied selectively under a write mask. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x4) AddMasked(y Float32x4, mask Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x8) AddMasked(y Float32x8, mask Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x16) AddMasked(y Float32x16, mask Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x2) AddMasked(y Float64x2, mask Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x4) AddMasked(y Float64x4, mask Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x8) AddMasked(y Float64x8, mask Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x16) AddMasked(y Int8x16, mask Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x32) AddMasked(y Int8x32, mask Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x64) AddMasked(y Int8x64, mask Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x8) AddMasked(y Int16x8, mask Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x16) AddMasked(y Int16x16, mask Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x32) AddMasked(y Int16x32, mask Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x4) AddMasked(y Int32x4, mask Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x8) AddMasked(y Int32x8, mask Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x16) AddMasked(y Int32x16, mask Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x2) AddMasked(y Int64x2, mask Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x4) AddMasked(y Int64x4, mask Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x8) AddMasked(y Int64x8, mask Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x16) AddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x32) AddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x64) AddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x8) AddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x16) AddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x32) AddMasked(y Uint16x32, mask Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x4) AddMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x8) AddMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x16) AddMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x2) AddMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AddPairs */ @@ -738,7 +738,7 @@ func (x Int8x32) AddSaturated(y Int8x32) Int8x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x64) AddSaturated(y Int8x64) Int8x64 // AddSaturated adds corresponding elements of two vectors with saturation. @@ -753,7 +753,7 @@ func (x Int16x16) AddSaturated(y Int16x16) Int16x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x32) AddSaturated(y Int16x32) Int16x32 // AddSaturated adds corresponding elements of two vectors with saturation. @@ -768,7 +768,7 @@ func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 // AddSaturated adds corresponding elements of two vectors with saturation. @@ -783,7 +783,7 @@ func (x Uint16x16) AddSaturated(y Uint16x16) Uint16x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 /* AddSaturatedMasked */ @@ -792,84 +792,84 @@ func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x16) AddSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x32) AddSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x64) AddSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x8) AddSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x16) AddSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x32) AddSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x16) AddSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x32) AddSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x64) AddSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x8) AddSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x16) AddSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x32) AddSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* AddSub */ @@ -908,7 +908,7 @@ func (x Int8x32) And(y Int8x32) Int8x32 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int8x64) And(y Int8x64) Int8x64 // And performs a bitwise AND operation between two vectors. @@ -923,7 +923,7 @@ func (x Int16x16) And(y Int16x16) Int16x16 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int16x32) And(y Int16x32) Int16x32 // And performs a bitwise AND operation between two vectors. @@ -938,7 +938,7 @@ func (x Int32x8) And(y Int32x8) Int32x8 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x16) And(y Int32x16) Int32x16 // And performs a bitwise AND operation between two vectors. @@ -953,7 +953,7 @@ func (x Int64x4) And(y Int64x4) Int64x4 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x8) And(y Int64x8) Int64x8 // And performs a bitwise AND operation between two vectors. @@ -968,7 +968,7 @@ func (x Uint8x32) And(y Uint8x32) Uint8x32 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint8x64) And(y Uint8x64) Uint8x64 // And performs a bitwise AND operation between two vectors. @@ -983,7 +983,7 @@ func (x Uint16x16) And(y Uint16x16) Uint16x16 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint16x32) And(y Uint16x32) Uint16x32 // And performs a bitwise AND operation between two vectors. @@ -998,7 +998,7 @@ func (x Uint32x8) And(y Uint32x8) Uint32x8 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x16) And(y Uint32x16) Uint32x16 // And performs a bitwise AND operation between two vectors. @@ -1013,7 +1013,7 @@ func (x Uint64x4) And(y Uint64x4) Uint64x4 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ @@ -1022,84 +1022,84 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x4) AndMasked(y Int32x4, mask Mask32x4) Int32x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x8) AndMasked(y Int32x8, mask Mask32x8) Int32x8 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x16) AndMasked(y Int32x16, mask Mask32x16) Int32x16 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x2) AndMasked(y Int64x2, mask Mask64x2) Int64x2 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x4) AndMasked(y Int64x4, mask Mask64x4) Int64x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x8) AndMasked(y Int64x8, mask Mask64x8) Int64x8 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x4) AndMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x8) AndMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x16) AndMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x2) AndMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x4) AndMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AndNot */ @@ -1116,7 +1116,7 @@ func (x Int8x32) AndNot(y Int8x32) Int8x32 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int8x64) AndNot(y Int8x64) Int8x64 // AndNot performs a bitwise x &^ y. @@ -1131,7 +1131,7 @@ func (x Int16x16) AndNot(y Int16x16) Int16x16 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int16x32) AndNot(y Int16x32) Int16x32 // AndNot performs a bitwise x &^ y. @@ -1146,7 +1146,7 @@ func (x Int32x8) AndNot(y Int32x8) Int32x8 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x16) AndNot(y Int32x16) Int32x16 // AndNot performs a bitwise x &^ y. @@ -1161,7 +1161,7 @@ func (x Int64x4) AndNot(y Int64x4) Int64x4 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x8) AndNot(y Int64x8) Int64x8 // AndNot performs a bitwise x &^ y. @@ -1176,7 +1176,7 @@ func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint8x64) AndNot(y Uint8x64) Uint8x64 // AndNot performs a bitwise x &^ y. @@ -1191,7 +1191,7 @@ func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint16x32) AndNot(y Uint16x32) Uint16x32 // AndNot performs a bitwise x &^ y. @@ -1206,7 +1206,7 @@ func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 // AndNot performs a bitwise x &^ y. @@ -1221,7 +1221,7 @@ func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ @@ -1230,84 +1230,84 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Average */ @@ -1324,7 +1324,7 @@ func (x Uint8x32) Average(y Uint8x32) Uint8x32 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x64) Average(y Uint8x64) Uint8x64 // Average computes the rounded average of corresponding elements. @@ -1339,7 +1339,7 @@ func (x Uint16x16) Average(y Uint16x16) Uint16x16 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* AverageMasked */ @@ -1348,42 +1348,42 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x16) AverageMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x32) AverageMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x64) AverageMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x8) AverageMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x16) AverageMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x32) AverageMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Ceil */ @@ -1414,42 +1414,42 @@ func (x Float64x4) Ceil() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaled(prec uint8) Float32x4 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaled(prec uint8) Float32x8 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaled(prec uint8) Float32x16 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaled(prec uint8) Float64x2 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaled(prec uint8) Float64x4 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaled(prec uint8) Float64x8 /* CeilScaledMasked */ @@ -1460,7 +1460,7 @@ func (x Float64x8) CeilScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 // CeilScaledMasked rounds elements up with specified precision. @@ -1469,7 +1469,7 @@ func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 // CeilScaledMasked rounds elements up with specified precision. @@ -1478,7 +1478,7 @@ func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 // CeilScaledMasked rounds elements up with specified precision. @@ -1487,7 +1487,7 @@ func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 // CeilScaledMasked rounds elements up with specified precision. @@ -1496,7 +1496,7 @@ func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 // CeilScaledMasked rounds elements up with specified precision. @@ -1505,7 +1505,7 @@ func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* CeilScaledResidue */ @@ -1514,42 +1514,42 @@ func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaledResidue(prec uint8) Float32x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaledResidue(prec uint8) Float32x8 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaledResidue(prec uint8) Float32x16 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaledResidue(prec uint8) Float64x2 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 /* CeilScaledResidueMasked */ @@ -1560,7 +1560,7 @@ func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1569,7 +1569,7 @@ func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1578,7 +1578,7 @@ func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1587,7 +1587,7 @@ func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1596,7 +1596,7 @@ func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1605,7 +1605,7 @@ func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Compress */ @@ -1613,37 +1613,37 @@ func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPS, CPU Feature: AVX512F +// Asm: VCOMPRESSPS, CPU Feature: AVX512 func (x Float32x4) Compress(mask Mask32x4) Float32x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPS, CPU Feature: AVX512F +// Asm: VCOMPRESSPS, CPU Feature: AVX512 func (x Float32x8) Compress(mask Mask32x8) Float32x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPS, CPU Feature: AVX512F +// Asm: VCOMPRESSPS, CPU Feature: AVX512 func (x Float32x16) Compress(mask Mask32x16) Float32x16 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPD, CPU Feature: AVX512F +// Asm: VCOMPRESSPD, CPU Feature: AVX512 func (x Float64x2) Compress(mask Mask64x2) Float64x2 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPD, CPU Feature: AVX512F +// Asm: VCOMPRESSPD, CPU Feature: AVX512 func (x Float64x4) Compress(mask Mask64x4) Float64x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPD, CPU Feature: AVX512F +// Asm: VCOMPRESSPD, CPU Feature: AVX512 func (x Float64x8) Compress(mask Mask64x8) Float64x8 // Compress performs a compression on vector x using mask by @@ -1685,37 +1685,37 @@ func (x Int16x32) Compress(mask Mask16x32) Int16x32 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Int32x4) Compress(mask Mask32x4) Int32x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Int32x8) Compress(mask Mask32x8) Int32x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Int32x16) Compress(mask Mask32x16) Int32x16 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Int64x2) Compress(mask Mask64x2) Int64x2 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Int64x4) Compress(mask Mask64x4) Int64x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Int64x8) Compress(mask Mask64x8) Int64x8 // Compress performs a compression on vector x using mask by @@ -1757,37 +1757,37 @@ func (x Uint16x32) Compress(mask Mask16x32) Uint16x32 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Uint32x4) Compress(mask Mask32x4) Uint32x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Uint32x8) Compress(mask Mask32x8) Uint32x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Uint32x16) Compress(mask Mask32x16) Uint32x16 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x2) Compress(mask Mask64x2) Uint64x2 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 /* ConvertToInt32 */ @@ -1804,7 +1804,7 @@ func (x Float32x8) ConvertToInt32() Int32x8 // ConvertToInt32 converts element values to int32. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x16) ConvertToInt32() Int32x16 /* ConvertToInt32Masked */ @@ -1813,38 +1813,38 @@ func (x Float32x16) ConvertToInt32() Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x4) ConvertToInt32Masked(mask Mask32x4) Int32x4 // ConvertToInt32 converts element values to int32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x8) ConvertToInt32Masked(mask Mask32x8) Int32x8 // ConvertToInt32 converts element values to int32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x16) ConvertToInt32Masked(mask Mask32x16) Int32x16 /* ConvertToUint32 */ // ConvertToUint32Masked converts element values to uint32. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x4) ConvertToUint32() Uint32x4 // ConvertToUint32Masked converts element values to uint32. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x8) ConvertToUint32() Uint32x8 // ConvertToUint32Masked converts element values to uint32. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32() Uint32x16 /* ConvertToUint32Masked */ @@ -1853,21 +1853,21 @@ func (x Float32x16) ConvertToUint32() Uint32x16 // // This operation is applied selectively under a write mask. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x4) ConvertToUint32Masked(mask Mask32x4) Uint32x4 // ConvertToUint32Masked converts element values to uint32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 // ConvertToUint32Masked converts element values to uint32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 /* CopySign */ @@ -1922,7 +1922,7 @@ func (x Float32x8) Div(y Float32x8) Float32x8 // Div divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x16) Div(y Float32x16) Float32x16 // Div divides elements of two vectors. @@ -1937,7 +1937,7 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Div divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x8) Div(y Float64x8) Float64x8 /* DivMasked */ @@ -1946,42 +1946,42 @@ func (x Float64x8) Div(y Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x4) DivMasked(y Float32x4, mask Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x8) DivMasked(y Float32x8, mask Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x16) DivMasked(y Float32x16, mask Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x2) DivMasked(y Float64x2, mask Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 /* DotProdPairs */ @@ -2001,7 +2001,7 @@ func (x Int16x16) DotProdPairs(y Int16x16) Int32x8 // DotProdPairs multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 /* DotProdPairsMasked */ @@ -2011,7 +2011,7 @@ func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x8) DotProdPairsMasked(y Int16x8, mask Mask16x8) Int32x4 // DotProdPairsMasked multiplies the elements and add the pairs together, @@ -2019,7 +2019,7 @@ func (x Int16x8) DotProdPairsMasked(y Int16x8, mask Mask16x8) Int32x4 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x16) DotProdPairsMasked(y Int16x16, mask Mask16x16) Int32x8 // DotProdPairsMasked multiplies the elements and add the pairs together, @@ -2027,7 +2027,7 @@ func (x Int16x16) DotProdPairsMasked(y Int16x16, mask Mask16x16) Int32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x32) DotProdPairsMasked(y Int16x32, mask Mask16x32) Int32x16 /* DotProdPairsSaturated */ @@ -2047,7 +2047,7 @@ func (x Uint8x32) DotProdPairsSaturated(y Int8x32) Int16x16 // DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 /* DotProdPairsSaturatedMasked */ @@ -2057,7 +2057,7 @@ func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x16) DotProdPairsSaturatedMasked(y Int8x16, mask Mask16x8) Int16x8 // DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, @@ -2065,7 +2065,7 @@ func (x Uint8x16) DotProdPairsSaturatedMasked(y Int8x16, mask Mask16x8) Int16x8 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x32) DotProdPairsSaturatedMasked(y Int8x32, mask Mask16x16) Int16x16 // DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, @@ -2073,7 +2073,7 @@ func (x Uint8x32) DotProdPairsSaturatedMasked(y Int8x32, mask Mask16x16) Int16x1 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x64) DotProdPairsSaturatedMasked(y Int8x64, mask Mask16x32) Int16x32 /* Equal */ @@ -2090,7 +2090,7 @@ func (x Int8x32) Equal(y Int8x32) Mask8x32 // Equal compares for equality. // -// Asm: VPCMPEQB, CPU Feature: AVX512BW +// Asm: VPCMPEQB, CPU Feature: AVX512 func (x Int8x64) Equal(y Int8x64) Mask8x64 // Equal compares for equality. @@ -2105,7 +2105,7 @@ func (x Int16x16) Equal(y Int16x16) Mask16x16 // Equal compares for equality. // -// Asm: VPCMPEQW, CPU Feature: AVX512BW +// Asm: VPCMPEQW, CPU Feature: AVX512 func (x Int16x32) Equal(y Int16x32) Mask16x32 // Equal compares for equality. @@ -2120,7 +2120,7 @@ func (x Int32x8) Equal(y Int32x8) Mask32x8 // Equal compares for equality. // -// Asm: VPCMPEQD, CPU Feature: AVX512F +// Asm: VPCMPEQD, CPU Feature: AVX512 func (x Int32x16) Equal(y Int32x16) Mask32x16 // Equal compares for equality. @@ -2135,7 +2135,7 @@ func (x Int64x4) Equal(y Int64x4) Mask64x4 // Equal compares for equality. // -// Asm: VPCMPEQQ, CPU Feature: AVX512F +// Asm: VPCMPEQQ, CPU Feature: AVX512 func (x Int64x8) Equal(y Int64x8) Mask64x8 // Equal compares for equality. @@ -2150,7 +2150,7 @@ func (x Uint8x32) Equal(y Uint8x32) Mask8x32 // Equal compares for equality. // -// Asm: VPCMPEQB, CPU Feature: AVX512BW +// Asm: VPCMPEQB, CPU Feature: AVX512 func (x Uint8x64) Equal(y Uint8x64) Mask8x64 // Equal compares for equality. @@ -2165,7 +2165,7 @@ func (x Uint16x16) Equal(y Uint16x16) Mask16x16 // Equal compares for equality. // -// Asm: VPCMPEQW, CPU Feature: AVX512BW +// Asm: VPCMPEQW, CPU Feature: AVX512 func (x Uint16x32) Equal(y Uint16x32) Mask16x32 // Equal compares for equality. @@ -2180,7 +2180,7 @@ func (x Uint32x8) Equal(y Uint32x8) Mask32x8 // Equal compares for equality. // -// Asm: VPCMPEQD, CPU Feature: AVX512F +// Asm: VPCMPEQD, CPU Feature: AVX512 func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality. @@ -2195,7 +2195,7 @@ func (x Uint64x4) Equal(y Uint64x4) Mask64x4 // Equal compares for equality. // -// Asm: VPCMPEQQ, CPU Feature: AVX512F +// Asm: VPCMPEQQ, CPU Feature: AVX512 func (x Uint64x8) Equal(y Uint64x8) Mask64x8 // Equal compares for equality. @@ -2210,7 +2210,7 @@ func (x Float32x8) Equal(y Float32x8) Mask32x8 // Equal compares for equality. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) Equal(y Float32x16) Mask32x16 // Equal compares for equality. @@ -2225,7 +2225,7 @@ func (x Float64x4) Equal(y Float64x4) Mask64x4 // Equal compares for equality. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Equal(y Float64x8) Mask64x8 /* EqualMasked */ @@ -2234,210 +2234,210 @@ func (x Float64x8) Equal(y Float64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) EqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) EqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) EqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) EqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) EqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) EqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) EqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) EqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) EqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) EqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) EqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) EqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) EqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) EqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) EqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) EqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) EqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) EqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) EqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) EqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) EqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) EqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) EqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) EqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) EqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) EqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) EqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) EqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Expand */ @@ -2445,37 +2445,37 @@ func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPS, CPU Feature: AVX512F +// Asm: VEXPANDPS, CPU Feature: AVX512 func (x Float32x4) Expand(mask Mask32x4) Float32x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPS, CPU Feature: AVX512F +// Asm: VEXPANDPS, CPU Feature: AVX512 func (x Float32x8) Expand(mask Mask32x8) Float32x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPS, CPU Feature: AVX512F +// Asm: VEXPANDPS, CPU Feature: AVX512 func (x Float32x16) Expand(mask Mask32x16) Float32x16 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPD, CPU Feature: AVX512F +// Asm: VEXPANDPD, CPU Feature: AVX512 func (x Float64x2) Expand(mask Mask64x2) Float64x2 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPD, CPU Feature: AVX512F +// Asm: VEXPANDPD, CPU Feature: AVX512 func (x Float64x4) Expand(mask Mask64x4) Float64x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPD, CPU Feature: AVX512F +// Asm: VEXPANDPD, CPU Feature: AVX512 func (x Float64x8) Expand(mask Mask64x8) Float64x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. @@ -2517,37 +2517,37 @@ func (x Int16x32) Expand(mask Mask16x32) Int16x32 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Int32x4) Expand(mask Mask32x4) Int32x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Int32x8) Expand(mask Mask32x8) Int32x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Int32x16) Expand(mask Mask32x16) Int32x16 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Int64x2) Expand(mask Mask64x2) Int64x2 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Int64x4) Expand(mask Mask64x4) Int64x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Int64x8) Expand(mask Mask64x8) Int64x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. @@ -2589,37 +2589,37 @@ func (x Uint16x32) Expand(mask Mask16x32) Uint16x32 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Uint32x4) Expand(mask Mask32x4) Uint32x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Uint32x8) Expand(mask Mask32x8) Uint32x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Uint32x16) Expand(mask Mask32x16) Uint32x16 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Uint64x2) Expand(mask Mask64x2) Uint64x2 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Uint64x4) Expand(mask Mask64x4) Uint64x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Uint64x8) Expand(mask Mask64x8) Uint64x8 /* Floor */ @@ -2650,42 +2650,42 @@ func (x Float64x4) Floor() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaled(prec uint8) Float32x4 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaled(prec uint8) Float32x8 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaled(prec uint8) Float32x16 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaled(prec uint8) Float64x2 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaled(prec uint8) Float64x4 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaled(prec uint8) Float64x8 /* FloorScaledMasked */ @@ -2696,7 +2696,7 @@ func (x Float64x8) FloorScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 // FloorScaledMasked rounds elements down with specified precision. @@ -2705,7 +2705,7 @@ func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 // FloorScaledMasked rounds elements down with specified precision. @@ -2714,7 +2714,7 @@ func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 // FloorScaledMasked rounds elements down with specified precision. @@ -2723,7 +2723,7 @@ func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 // FloorScaledMasked rounds elements down with specified precision. @@ -2732,7 +2732,7 @@ func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 // FloorScaledMasked rounds elements down with specified precision. @@ -2741,7 +2741,7 @@ func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* FloorScaledResidue */ @@ -2750,42 +2750,42 @@ func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 /* FloorScaledResidueMasked */ @@ -2796,7 +2796,7 @@ func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2805,7 +2805,7 @@ func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2814,7 +2814,7 @@ func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2823,7 +2823,7 @@ func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2832,7 +2832,7 @@ func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2841,7 +2841,7 @@ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* GaloisFieldAffineTransform */ @@ -3042,14 +3042,14 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRB, CPU Feature: AVX512BW +// Asm: VPEXTRB, CPU Feature: AVX512 func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRW, CPU Feature: AVX512BW +// Asm: VPEXTRW, CPU Feature: AVX512 func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. @@ -3070,14 +3070,14 @@ func (x Int64x2) GetElem(index uint8) int64 // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRB, CPU Feature: AVX512BW +// Asm: VPEXTRB, CPU Feature: AVX512 func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRW, CPU Feature: AVX512BW +// Asm: VPEXTRW, CPU Feature: AVX512 func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. @@ -3103,7 +3103,7 @@ func (x Float32x8) GetHi() Float32x4 // GetHi returns the upper half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float32x16) GetHi() Float32x8 // GetHi returns the upper half of x. @@ -3113,7 +3113,7 @@ func (x Float64x4) GetHi() Float64x2 // GetHi returns the upper half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float64x8) GetHi() Float64x4 // GetHi returns the upper half of x. @@ -3123,7 +3123,7 @@ func (x Int8x32) GetHi() Int8x16 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int8x64) GetHi() Int8x32 // GetHi returns the upper half of x. @@ -3133,7 +3133,7 @@ func (x Int16x16) GetHi() Int16x8 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int16x32) GetHi() Int16x16 // GetHi returns the upper half of x. @@ -3143,7 +3143,7 @@ func (x Int32x8) GetHi() Int32x4 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int32x16) GetHi() Int32x8 // GetHi returns the upper half of x. @@ -3153,7 +3153,7 @@ func (x Int64x4) GetHi() Int64x2 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int64x8) GetHi() Int64x4 // GetHi returns the upper half of x. @@ -3163,7 +3163,7 @@ func (x Uint8x32) GetHi() Uint8x16 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint8x64) GetHi() Uint8x32 // GetHi returns the upper half of x. @@ -3173,7 +3173,7 @@ func (x Uint16x16) GetHi() Uint16x8 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint16x32) GetHi() Uint16x16 // GetHi returns the upper half of x. @@ -3183,7 +3183,7 @@ func (x Uint32x8) GetHi() Uint32x4 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint32x16) GetHi() Uint32x8 // GetHi returns the upper half of x. @@ -3193,7 +3193,7 @@ func (x Uint64x4) GetHi() Uint64x2 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint64x8) GetHi() Uint64x4 /* GetLo */ @@ -3205,7 +3205,7 @@ func (x Float32x8) GetLo() Float32x4 // GetLo returns the lower half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float32x16) GetLo() Float32x8 // GetLo returns the lower half of x. @@ -3215,7 +3215,7 @@ func (x Float64x4) GetLo() Float64x2 // GetLo returns the lower half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float64x8) GetLo() Float64x4 // GetLo returns the lower half of x. @@ -3225,7 +3225,7 @@ func (x Int8x32) GetLo() Int8x16 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int8x64) GetLo() Int8x32 // GetLo returns the lower half of x. @@ -3235,7 +3235,7 @@ func (x Int16x16) GetLo() Int16x8 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int16x32) GetLo() Int16x16 // GetLo returns the lower half of x. @@ -3245,7 +3245,7 @@ func (x Int32x8) GetLo() Int32x4 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int32x16) GetLo() Int32x8 // GetLo returns the lower half of x. @@ -3255,7 +3255,7 @@ func (x Int64x4) GetLo() Int64x2 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int64x8) GetLo() Int64x4 // GetLo returns the lower half of x. @@ -3265,7 +3265,7 @@ func (x Uint8x32) GetLo() Uint8x16 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint8x64) GetLo() Uint8x32 // GetLo returns the lower half of x. @@ -3275,7 +3275,7 @@ func (x Uint16x16) GetLo() Uint16x8 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint16x32) GetLo() Uint16x16 // GetLo returns the lower half of x. @@ -3285,7 +3285,7 @@ func (x Uint32x8) GetLo() Uint32x4 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint32x16) GetLo() Uint32x8 // GetLo returns the lower half of x. @@ -3295,7 +3295,7 @@ func (x Uint64x4) GetLo() Uint64x2 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint64x8) GetLo() Uint64x4 /* Greater */ @@ -3312,7 +3312,7 @@ func (x Int8x32) Greater(y Int8x32) Mask8x32 // Greater compares for greater than. // -// Asm: VPCMPGTB, CPU Feature: AVX512BW +// Asm: VPCMPGTB, CPU Feature: AVX512 func (x Int8x64) Greater(y Int8x64) Mask8x64 // Greater compares for greater than. @@ -3327,7 +3327,7 @@ func (x Int16x16) Greater(y Int16x16) Mask16x16 // Greater compares for greater than. // -// Asm: VPCMPGTW, CPU Feature: AVX512BW +// Asm: VPCMPGTW, CPU Feature: AVX512 func (x Int16x32) Greater(y Int16x32) Mask16x32 // Greater compares for greater than. @@ -3342,7 +3342,7 @@ func (x Int32x8) Greater(y Int32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VPCMPGTD, CPU Feature: AVX512F +// Asm: VPCMPGTD, CPU Feature: AVX512 func (x Int32x16) Greater(y Int32x16) Mask32x16 // Greater compares for greater than. @@ -3357,7 +3357,7 @@ func (x Int64x4) Greater(y Int64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VPCMPGTQ, CPU Feature: AVX512F +// Asm: VPCMPGTQ, CPU Feature: AVX512 func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. @@ -3372,7 +3372,7 @@ func (x Float32x8) Greater(y Float32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) Greater(y Float32x16) Mask32x16 // Greater compares for greater than. @@ -3387,67 +3387,67 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Greater(y Float64x8) Mask64x8 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) Greater(y Uint8x16) Mask8x16 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) Greater(y Uint8x32) Mask8x32 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Greater(y Uint8x64) Mask8x64 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) Greater(y Uint16x8) Mask16x8 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) Greater(y Uint16x16) Mask16x16 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Greater(y Uint16x32) Mask16x32 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) Greater(y Uint32x4) Mask32x4 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) Greater(y Uint32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Greater(y Uint32x16) Mask32x16 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) Greater(y Uint64x2) Mask64x2 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) Greater(y Uint64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) Greater(y Uint64x8) Mask64x8 /* GreaterEqual */ @@ -3464,7 +3464,7 @@ func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 // GreaterEqual compares for greater than or equal. @@ -3479,127 +3479,127 @@ func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* GreaterEqualMasked */ @@ -3608,210 +3608,210 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) GreaterEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) GreaterEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) GreaterEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) GreaterEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) GreaterEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) GreaterEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) GreaterEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) GreaterEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) GreaterEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) GreaterEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) GreaterEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) GreaterEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) GreaterEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) GreaterEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) GreaterEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) GreaterEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) GreaterEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) GreaterEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) GreaterEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) GreaterEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) GreaterEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* GreaterMasked */ @@ -3820,210 +3820,210 @@ func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) GreaterMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) GreaterMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) GreaterMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) GreaterMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) GreaterMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) GreaterMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) GreaterMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) GreaterMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) GreaterMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) GreaterMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) GreaterMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) GreaterMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) GreaterMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) GreaterMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) GreaterMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) GreaterMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) GreaterMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) GreaterMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) GreaterMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) GreaterMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) GreaterMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* IsNan */ @@ -4040,7 +4040,7 @@ func (x Float32x8) IsNan(y Float32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) IsNan(y Float32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). @@ -4055,7 +4055,7 @@ func (x Float64x4) IsNan(y Float64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* IsNanMasked */ @@ -4064,42 +4064,42 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) IsNanMasked(y Float32x4, mask Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) IsNanMasked(y Float32x8, mask Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) IsNanMasked(y Float32x16, mask Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) IsNanMasked(y Float64x2, mask Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) IsNanMasked(y Float64x4, mask Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) IsNanMasked(y Float64x8, mask Mask64x8) Mask64x8 /* Less */ @@ -4116,7 +4116,7 @@ func (x Float32x8) Less(y Float32x8) Mask32x8 // Less compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. @@ -4131,127 +4131,127 @@ func (x Float64x4) Less(y Float64x4) Mask64x4 // Less compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Less(y Float64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) Less(y Int8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) Less(y Int8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) Less(y Int8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) Less(y Int16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) Less(y Int16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) Less(y Int16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) Less(y Int32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) Less(y Int32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) Less(y Int32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) Less(y Int64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) Less(y Int64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) Less(y Int64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) Less(y Uint8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) Less(y Uint8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Less(y Uint8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) Less(y Uint16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Less(y Uint16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) Less(y Uint32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) Less(y Uint32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Less(y Uint32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) Less(y Uint64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) Less(y Uint64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) Less(y Uint64x8) Mask64x8 /* LessEqual */ @@ -4268,7 +4268,7 @@ func (x Float32x8) LessEqual(y Float32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) LessEqual(y Float32x16) Mask32x16 // LessEqual compares for less than or equal. @@ -4283,127 +4283,127 @@ func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessEqual(y Float64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) LessEqual(y Int8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessEqual(y Int8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) LessEqual(y Int16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) LessEqual(y Int16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessEqual(y Int16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) LessEqual(y Int32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) LessEqual(y Int32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessEqual(y Int32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) LessEqual(y Int64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) LessEqual(y Int64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessEqual(y Int64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 /* LessEqualMasked */ @@ -4412,210 +4412,210 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) LessEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) LessEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) LessEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) LessEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) LessEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) LessEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) LessEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) LessEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) LessEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) LessEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) LessEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) LessEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) LessEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) LessEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) LessEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) LessEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) LessEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) LessEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) LessEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) LessEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) LessEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* LessMasked */ @@ -4624,210 +4624,210 @@ func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) LessMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) LessMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) LessMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) LessMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) LessMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) LessMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) LessMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) LessMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) LessMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) LessMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) LessMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) LessMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) LessMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) LessMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) LessMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) LessMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) LessMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) LessMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) LessMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) LessMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) LessMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) LessMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Max */ @@ -4844,7 +4844,7 @@ func (x Float32x8) Max(y Float32x8) Float32x8 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x16) Max(y Float32x16) Float32x16 // Max computes the maximum of corresponding elements. @@ -4859,7 +4859,7 @@ func (x Float64x4) Max(y Float64x4) Float64x4 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x8) Max(y Float64x8) Float64x8 // Max computes the maximum of corresponding elements. @@ -4874,7 +4874,7 @@ func (x Int8x32) Max(y Int8x32) Int8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x64) Max(y Int8x64) Int8x64 // Max computes the maximum of corresponding elements. @@ -4889,7 +4889,7 @@ func (x Int16x16) Max(y Int16x16) Int16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x32) Max(y Int16x32) Int16x32 // Max computes the maximum of corresponding elements. @@ -4904,22 +4904,22 @@ func (x Int32x8) Max(y Int32x8) Int32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x16) Max(y Int32x16) Int32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x2) Max(y Int64x2) Int64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x4) Max(y Int64x4) Int64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x8) Max(y Int64x8) Int64x8 // Max computes the maximum of corresponding elements. @@ -4934,7 +4934,7 @@ func (x Uint8x32) Max(y Uint8x32) Uint8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x64) Max(y Uint8x64) Uint8x64 // Max computes the maximum of corresponding elements. @@ -4949,7 +4949,7 @@ func (x Uint16x16) Max(y Uint16x16) Uint16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x32) Max(y Uint16x32) Uint16x32 // Max computes the maximum of corresponding elements. @@ -4964,22 +4964,22 @@ func (x Uint32x8) Max(y Uint32x8) Uint32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x16) Max(y Uint32x16) Uint32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x2) Max(y Uint64x2) Uint64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x4) Max(y Uint64x4) Uint64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x8) Max(y Uint64x8) Uint64x8 /* MaxMasked */ @@ -4988,210 +4988,210 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x4) MaxMasked(y Float32x4, mask Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x8) MaxMasked(y Float32x8, mask Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x16) MaxMasked(y Float32x16, mask Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x2) MaxMasked(y Float64x2, mask Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x4) MaxMasked(y Float64x4, mask Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x8) MaxMasked(y Float64x8, mask Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x16) MaxMasked(y Int8x16, mask Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x32) MaxMasked(y Int8x32, mask Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x64) MaxMasked(y Int8x64, mask Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x8) MaxMasked(y Int16x8, mask Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x16) MaxMasked(y Int16x16, mask Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x32) MaxMasked(y Int16x32, mask Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x4) MaxMasked(y Int32x4, mask Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x8) MaxMasked(y Int32x8, mask Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x16) MaxMasked(y Int32x16, mask Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x2) MaxMasked(y Int64x2, mask Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x4) MaxMasked(y Int64x4, mask Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x8) MaxMasked(y Int64x8, mask Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x16) MaxMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x32) MaxMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x64) MaxMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x8) MaxMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x16) MaxMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x32) MaxMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x4) MaxMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x8) MaxMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x16) MaxMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x2) MaxMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x4) MaxMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x8) MaxMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Min */ @@ -5208,7 +5208,7 @@ func (x Float32x8) Min(y Float32x8) Float32x8 // Min computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x16) Min(y Float32x16) Float32x16 // Min computes the minimum of corresponding elements. @@ -5223,7 +5223,7 @@ func (x Float64x4) Min(y Float64x4) Float64x4 // Min computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x8) Min(y Float64x8) Float64x8 // Min computes the minimum of corresponding elements. @@ -5238,7 +5238,7 @@ func (x Int8x32) Min(y Int8x32) Int8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x64) Min(y Int8x64) Int8x64 // Min computes the minimum of corresponding elements. @@ -5253,7 +5253,7 @@ func (x Int16x16) Min(y Int16x16) Int16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x32) Min(y Int16x32) Int16x32 // Min computes the minimum of corresponding elements. @@ -5268,22 +5268,22 @@ func (x Int32x8) Min(y Int32x8) Int32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x16) Min(y Int32x16) Int32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x2) Min(y Int64x2) Int64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x4) Min(y Int64x4) Int64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x8) Min(y Int64x8) Int64x8 // Min computes the minimum of corresponding elements. @@ -5298,7 +5298,7 @@ func (x Uint8x32) Min(y Uint8x32) Uint8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x64) Min(y Uint8x64) Uint8x64 // Min computes the minimum of corresponding elements. @@ -5313,7 +5313,7 @@ func (x Uint16x16) Min(y Uint16x16) Uint16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x32) Min(y Uint16x32) Uint16x32 // Min computes the minimum of corresponding elements. @@ -5328,22 +5328,22 @@ func (x Uint32x8) Min(y Uint32x8) Uint32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x16) Min(y Uint32x16) Uint32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x2) Min(y Uint64x2) Uint64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x4) Min(y Uint64x4) Uint64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x8) Min(y Uint64x8) Uint64x8 /* MinMasked */ @@ -5352,210 +5352,210 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x4) MinMasked(y Float32x4, mask Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x8) MinMasked(y Float32x8, mask Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x16) MinMasked(y Float32x16, mask Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x2) MinMasked(y Float64x2, mask Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x4) MinMasked(y Float64x4, mask Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x8) MinMasked(y Float64x8, mask Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x16) MinMasked(y Int8x16, mask Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x32) MinMasked(y Int8x32, mask Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x64) MinMasked(y Int8x64, mask Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x8) MinMasked(y Int16x8, mask Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x16) MinMasked(y Int16x16, mask Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x32) MinMasked(y Int16x32, mask Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x4) MinMasked(y Int32x4, mask Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x8) MinMasked(y Int32x8, mask Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x16) MinMasked(y Int32x16, mask Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x2) MinMasked(y Int64x2, mask Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x4) MinMasked(y Int64x4, mask Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x8) MinMasked(y Int64x8, mask Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x16) MinMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x32) MinMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x64) MinMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x8) MinMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x16) MinMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x32) MinMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x4) MinMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x8) MinMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x16) MinMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x2) MinMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x4) MinMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x8) MinMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Mul */ @@ -5572,7 +5572,7 @@ func (x Float32x8) Mul(y Float32x8) Float32x8 // Mul multiplies corresponding elements of two vectors. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x16) Mul(y Float32x16) Float32x16 // Mul multiplies corresponding elements of two vectors. @@ -5587,7 +5587,7 @@ func (x Float64x4) Mul(y Float64x4) Float64x4 // Mul multiplies corresponding elements of two vectors. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x8) Mul(y Float64x8) Float64x8 // Mul multiplies corresponding elements of two vectors. @@ -5602,7 +5602,7 @@ func (x Int16x16) Mul(y Int16x16) Int16x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x32) Mul(y Int16x32) Int16x32 // Mul multiplies corresponding elements of two vectors. @@ -5617,22 +5617,22 @@ func (x Int32x8) Mul(y Int32x8) Int32x8 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x16) Mul(y Int32x16) Int32x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x2) Mul(y Int64x2) Int64x2 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x4) Mul(y Int64x4) Int64x4 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x8) Mul(y Int64x8) Int64x8 // Mul multiplies corresponding elements of two vectors. @@ -5647,7 +5647,7 @@ func (x Uint16x16) Mul(y Uint16x16) Uint16x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x32) Mul(y Uint16x32) Uint16x32 // Mul multiplies corresponding elements of two vectors. @@ -5662,54 +5662,54 @@ func (x Uint32x8) Mul(y Uint32x8) Uint32x8 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x16) Mul(y Uint32x16) Uint32x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x2) Mul(y Uint64x2) Uint64x2 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x4) Mul(y Uint64x4) Uint64x4 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x8) Mul(y Uint64x8) Uint64x8 /* MulAdd */ // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x4) MulAdd(y Float32x4, z Float32x4) Float32x4 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x8) MulAdd(y Float32x8, z Float32x8) Float32x8 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x16) MulAdd(y Float32x16, z Float32x16) Float32x16 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x2) MulAdd(y Float64x2, z Float64x2) Float64x2 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x4) MulAdd(y Float64x4, z Float64x4) Float64x4 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 /* MulAddMasked */ @@ -5718,74 +5718,74 @@ func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x4) MulAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x8) MulAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x16) MulAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x2) MulAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x4) MulAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x8) MulAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* MulAddSub */ // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x4) MulAddSub(y Float32x4, z Float32x4) Float32x4 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x8) MulAddSub(y Float32x8, z Float32x8) Float32x8 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x16) MulAddSub(y Float32x16, z Float32x16) Float32x16 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x2) MulAddSub(y Float64x2, z Float64x2) Float64x2 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x4) MulAddSub(y Float64x4, z Float64x4) Float64x4 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 /* MulAddSubMasked */ @@ -5794,42 +5794,42 @@ func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x4) MulAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x8) MulAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x16) MulAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x2) MulAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x4) MulAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x8) MulAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* MulEvenWiden */ @@ -5872,7 +5872,7 @@ func (x Int16x16) MulHigh(y Int16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x32) MulHigh(y Int16x32) Int16x32 // MulHigh multiplies elements and stores the high part of the result. @@ -5887,7 +5887,7 @@ func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ @@ -5896,42 +5896,42 @@ func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* MulMasked */ @@ -5940,200 +5940,200 @@ func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x8) MulMasked(y Int16x8, mask Mask16x8) Int16x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x16) MulMasked(y Int16x16, mask Mask16x16) Int16x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x32) MulMasked(y Int16x32, mask Mask16x32) Int16x32 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x4) MulMasked(y Int32x4, mask Mask32x4) Int32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x8) MulMasked(y Int32x8, mask Mask32x8) Int32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x16) MulMasked(y Int32x16, mask Mask32x16) Int32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x2) MulMasked(y Int64x2, mask Mask64x2) Int64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x4) MulMasked(y Int64x4, mask Mask64x4) Int64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x8) MulMasked(y Int64x8, mask Mask64x8) Int64x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x8) MulMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x16) MulMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x32) MulMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x4) MulMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x8) MulMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x16) MulMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x2) MulMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x4) MulMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x8) MulMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* MulSubAdd */ // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x4) MulSubAdd(y Float32x4, z Float32x4) Float32x4 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x8) MulSubAdd(y Float32x8, z Float32x8) Float32x8 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x16) MulSubAdd(y Float32x16, z Float32x16) Float32x16 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x2) MulSubAdd(y Float64x2, z Float64x2) Float64x2 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x4) MulSubAdd(y Float64x4, z Float64x4) Float64x4 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 /* MulSubAddMasked */ @@ -6142,42 +6142,42 @@ func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x4) MulSubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x8) MulSubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x16) MulSubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x2) MulSubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x4) MulSubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x8) MulSubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* NotEqual */ @@ -6194,7 +6194,7 @@ func (x Float32x8) NotEqual(y Float32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) NotEqual(y Float32x16) Mask32x16 // NotEqual compares for inequality. @@ -6209,127 +6209,127 @@ func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) NotEqual(y Float64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) NotEqual(y Int8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) NotEqual(y Int8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) NotEqual(y Int8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) NotEqual(y Int16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) NotEqual(y Int32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) NotEqual(y Int32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) NotEqual(y Int32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) NotEqual(y Int64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) NotEqual(y Int64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) NotEqual(y Int64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* NotEqualMasked */ @@ -6338,210 +6338,210 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) NotEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) NotEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) NotEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) NotEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) NotEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) NotEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* OnesCount */ @@ -6850,7 +6850,7 @@ func (x Int8x32) Or(y Int8x32) Int8x32 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int8x64) Or(y Int8x64) Int8x64 // Or performs a bitwise OR operation between two vectors. @@ -6865,7 +6865,7 @@ func (x Int16x16) Or(y Int16x16) Int16x16 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int16x32) Or(y Int16x32) Int16x32 // Or performs a bitwise OR operation between two vectors. @@ -6880,7 +6880,7 @@ func (x Int32x8) Or(y Int32x8) Int32x8 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x16) Or(y Int32x16) Int32x16 // Or performs a bitwise OR operation between two vectors. @@ -6895,7 +6895,7 @@ func (x Int64x4) Or(y Int64x4) Int64x4 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x8) Or(y Int64x8) Int64x8 // Or performs a bitwise OR operation between two vectors. @@ -6910,7 +6910,7 @@ func (x Uint8x32) Or(y Uint8x32) Uint8x32 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint8x64) Or(y Uint8x64) Uint8x64 // Or performs a bitwise OR operation between two vectors. @@ -6925,7 +6925,7 @@ func (x Uint16x16) Or(y Uint16x16) Uint16x16 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint16x32) Or(y Uint16x32) Uint16x32 // Or performs a bitwise OR operation between two vectors. @@ -6940,7 +6940,7 @@ func (x Uint32x8) Or(y Uint32x8) Uint32x8 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x16) Or(y Uint32x16) Uint32x16 // Or performs a bitwise OR operation between two vectors. @@ -6955,7 +6955,7 @@ func (x Uint64x4) Or(y Uint64x4) Uint64x4 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ @@ -6964,84 +6964,84 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x4) OrMasked(y Int32x4, mask Mask32x4) Int32x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x8) OrMasked(y Int32x8, mask Mask32x8) Int32x8 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x16) OrMasked(y Int32x16, mask Mask32x16) Int32x16 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x2) OrMasked(y Int64x2, mask Mask64x2) Int64x2 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x4) OrMasked(y Int64x4, mask Mask64x4) Int64x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x8) OrMasked(y Int64x8, mask Mask64x8) Int64x8 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x4) OrMasked(y Uint32x4, mask Mask32x4) Uint32x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x8) OrMasked(y Uint32x8, mask Mask32x8) Uint32x8 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Permute */ @@ -7092,42 +7092,42 @@ func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x8) Permute(indices Uint16x8) Int16x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x16) Permute(indices Uint16x16) Int16x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x32) Permute(indices Uint16x32) Int16x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x32) Permute(indices Uint16x32) Uint16x32 // Permute performs a full permutation of vector x using indices: @@ -7155,63 +7155,63 @@ func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPS, CPU Feature: AVX512F +// Asm: VPERMPS, CPU Feature: AVX512 func (x Float32x16) Permute(indices Uint32x16) Float32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Int32x16) Permute(indices Uint32x16) Int32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x4) Permute(indices Uint64x4) Float64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x4) Permute(indices Uint64x4) Int64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x8) Permute(indices Uint64x8) Float64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x8) Permute(indices Uint64x8) Int64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x8) Permute(indices Uint64x8) Uint64x8 /* Permute2 */ @@ -7269,7 +7269,7 @@ func (x Uint8x64) Permute2(y Uint8x64, indices Uint8x64) Uint8x64 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7277,7 +7277,7 @@ func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7285,7 +7285,7 @@ func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7293,7 +7293,7 @@ func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7301,7 +7301,7 @@ func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 // Permute2 performs a full permutation of vector x, y using indices: @@ -7309,7 +7309,7 @@ func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 // Permute2 performs a full permutation of vector x, y using indices: @@ -7317,7 +7317,7 @@ func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7325,7 +7325,7 @@ func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7333,7 +7333,7 @@ func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7341,7 +7341,7 @@ func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7349,7 +7349,7 @@ func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7357,7 +7357,7 @@ func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7365,7 +7365,7 @@ func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7373,7 +7373,7 @@ func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7381,7 +7381,7 @@ func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7389,7 +7389,7 @@ func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 // Permute2 performs a full permutation of vector x, y using indices: @@ -7397,7 +7397,7 @@ func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 // Permute2 performs a full permutation of vector x, y using indices: @@ -7405,7 +7405,7 @@ func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 // Permute2 performs a full permutation of vector x, y using indices: @@ -7413,7 +7413,7 @@ func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7421,7 +7421,7 @@ func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7429,7 +7429,7 @@ func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7437,7 +7437,7 @@ func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7445,7 +7445,7 @@ func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7453,7 +7453,7 @@ func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 /* Permute2Masked */ @@ -7525,7 +7525,7 @@ func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int16x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7535,7 +7535,7 @@ func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int1 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Uint16x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7545,7 +7545,7 @@ func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) Int16x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7555,7 +7555,7 @@ func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16) Uint16x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7565,7 +7565,7 @@ func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) Int16x32 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7575,7 +7575,7 @@ func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32) Uint16x32 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7585,7 +7585,7 @@ func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) Float32x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7595,7 +7595,7 @@ func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int32x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7605,7 +7605,7 @@ func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int3 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Uint32x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7615,7 +7615,7 @@ func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) Float32x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7625,7 +7625,7 @@ func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int32x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7635,7 +7635,7 @@ func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int3 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Uint32x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7645,7 +7645,7 @@ func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x16) Float32x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7655,7 +7655,7 @@ func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) Int32x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7665,7 +7665,7 @@ func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16) Uint32x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7675,7 +7675,7 @@ func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) Float64x2 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7685,7 +7685,7 @@ func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int64x2 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7695,7 +7695,7 @@ func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int6 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Uint64x2 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7705,7 +7705,7 @@ func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) Float64x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7715,7 +7715,7 @@ func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int64x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7725,7 +7725,7 @@ func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int6 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Uint64x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7735,7 +7735,7 @@ func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) Float64x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7745,7 +7745,7 @@ func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int64x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7755,7 +7755,7 @@ func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int6 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Uint64x8 /* PermuteMasked */ @@ -7820,7 +7820,7 @@ func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7829,7 +7829,7 @@ func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7838,7 +7838,7 @@ func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7847,7 +7847,7 @@ func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7856,7 +7856,7 @@ func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // PermuteMasked performs a full permutation of vector x using indices: @@ -7865,7 +7865,7 @@ func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // PermuteMasked performs a full permutation of vector x using indices: @@ -7874,7 +7874,7 @@ func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPS, CPU Feature: AVX512F +// Asm: VPERMPS, CPU Feature: AVX512 func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7883,7 +7883,7 @@ func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7892,7 +7892,7 @@ func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7901,7 +7901,7 @@ func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPS, CPU Feature: AVX512F +// Asm: VPERMPS, CPU Feature: AVX512 func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7910,7 +7910,7 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7919,7 +7919,7 @@ func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7928,7 +7928,7 @@ func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // PermuteMasked performs a full permutation of vector x using indices: @@ -7937,7 +7937,7 @@ func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // PermuteMasked performs a full permutation of vector x using indices: @@ -7946,7 +7946,7 @@ func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // PermuteMasked performs a full permutation of vector x using indices: @@ -7955,7 +7955,7 @@ func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7964,7 +7964,7 @@ func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7973,7 +7973,7 @@ func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 /* Reciprocal */ @@ -7990,22 +7990,22 @@ func (x Float32x8) Reciprocal() Float32x8 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x16) Reciprocal() Float32x16 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x2) Reciprocal() Float64x2 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x4) Reciprocal() Float64x4 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x8) Reciprocal() Float64x8 /* ReciprocalMasked */ @@ -8014,42 +8014,42 @@ func (x Float64x8) Reciprocal() Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x4) ReciprocalMasked(mask Mask32x4) Float32x4 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x8) ReciprocalMasked(mask Mask32x8) Float32x8 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x16) ReciprocalMasked(mask Mask32x16) Float32x16 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x2) ReciprocalMasked(mask Mask64x2) Float64x2 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x4) ReciprocalMasked(mask Mask64x4) Float64x4 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x8) ReciprocalMasked(mask Mask64x8) Float64x8 /* ReciprocalSqrt */ @@ -8066,22 +8066,22 @@ func (x Float32x8) ReciprocalSqrt() Float32x8 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x16) ReciprocalSqrt() Float32x16 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x2) ReciprocalSqrt() Float64x2 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x4) ReciprocalSqrt() Float64x4 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x8) ReciprocalSqrt() Float64x8 /* ReciprocalSqrtMasked */ @@ -8090,42 +8090,42 @@ func (x Float64x8) ReciprocalSqrt() Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x4) ReciprocalSqrtMasked(mask Mask32x4) Float32x4 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x8) ReciprocalSqrtMasked(mask Mask32x8) Float32x8 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x16) ReciprocalSqrtMasked(mask Mask32x16) Float32x16 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x2) ReciprocalSqrtMasked(mask Mask64x2) Float64x2 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x4) ReciprocalSqrtMasked(mask Mask64x4) Float64x4 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 /* RotateAllLeft */ @@ -8134,84 +8134,84 @@ func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 /* RotateAllLeftMasked */ @@ -8222,7 +8222,7 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8231,7 +8231,7 @@ func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8240,7 +8240,7 @@ func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8249,7 +8249,7 @@ func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8258,7 +8258,7 @@ func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8267,7 +8267,7 @@ func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8276,7 +8276,7 @@ func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8285,7 +8285,7 @@ func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8294,7 +8294,7 @@ func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8303,7 +8303,7 @@ func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8312,7 +8312,7 @@ func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8321,7 +8321,7 @@ func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateAllRight */ @@ -8330,84 +8330,84 @@ func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 /* RotateAllRightMasked */ @@ -8418,7 +8418,7 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8427,7 +8427,7 @@ func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8436,7 +8436,7 @@ func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8445,7 +8445,7 @@ func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8454,7 +8454,7 @@ func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8463,7 +8463,7 @@ func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8472,7 +8472,7 @@ func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8481,7 +8481,7 @@ func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8490,7 +8490,7 @@ func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8499,7 +8499,7 @@ func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8508,7 +8508,7 @@ func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8517,69 +8517,69 @@ func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateLeft */ // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x4) RotateLeft(y Int32x4) Int32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x8) RotateLeft(y Int32x8) Int32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x16) RotateLeft(y Int32x16) Int32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x2) RotateLeft(y Int64x2) Int64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x4) RotateLeft(y Int64x4) Int64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x8) RotateLeft(y Int64x8) Int64x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 /* RotateLeftMasked */ @@ -8588,146 +8588,146 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x4) RotateLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x8) RotateLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x16) RotateLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x2) RotateLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x4) RotateLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x8) RotateLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x4) RotateLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x8) RotateLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x16) RotateLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x2) RotateLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x4) RotateLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x8) RotateLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* RotateRight */ // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x4) RotateRight(y Int32x4) Int32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x8) RotateRight(y Int32x8) Int32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x16) RotateRight(y Int32x16) Int32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x2) RotateRight(y Int64x2) Int64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x4) RotateRight(y Int64x4) Int64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x8) RotateRight(y Int64x8) Int64x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* RotateRightMasked */ @@ -8736,84 +8736,84 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x4) RotateRightMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x8) RotateRightMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x16) RotateRightMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x2) RotateRightMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x4) RotateRightMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* RoundToEven */ @@ -8844,42 +8844,42 @@ func (x Float64x4) RoundToEven() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaled(prec uint8) Float32x4 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaled(prec uint8) Float32x8 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaled(prec uint8) Float32x16 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaled(prec uint8) Float64x2 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 /* RoundToEvenScaledMasked */ @@ -8890,7 +8890,7 @@ func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8899,7 +8899,7 @@ func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8908,7 +8908,7 @@ func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8917,7 +8917,7 @@ func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8926,7 +8926,7 @@ func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8935,7 +8935,7 @@ func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* RoundToEvenScaledResidue */ @@ -8944,42 +8944,42 @@ func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaledResidue(prec uint8) Float32x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaledResidue(prec uint8) Float32x8 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaledResidue(prec uint8) Float32x16 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaledResidue(prec uint8) Float64x2 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 /* RoundToEvenScaledResidueMasked */ @@ -8990,7 +8990,7 @@ func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -8999,7 +8999,7 @@ func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9008,7 +9008,7 @@ func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9017,7 +9017,7 @@ func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) F // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9026,7 +9026,7 @@ func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9035,39 +9035,39 @@ func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Scale */ // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x4) Scale(y Float32x4) Float32x4 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x8) Scale(y Float32x8) Float32x8 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x16) Scale(y Float32x16) Float32x16 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x2) Scale(y Float64x2) Float64x2 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x4) Scale(y Float64x4) Float64x4 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x8) Scale(y Float64x8) Float64x8 /* ScaleMasked */ @@ -9076,42 +9076,42 @@ func (x Float64x8) Scale(y Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x4) ScaleMasked(y Float32x4, mask Mask32x4) Float32x4 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x8) ScaleMasked(y Float32x8, mask Mask32x8) Float32x8 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x16) ScaleMasked(y Float32x16, mask Mask32x16) Float32x16 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x2) ScaleMasked(y Float64x2, mask Mask64x2) Float64x2 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x4) ScaleMasked(y Float64x4, mask Mask64x4) Float64x4 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 /* SetElem */ @@ -9181,7 +9181,7 @@ func (x Float32x8) SetHi(y Float32x4) Float32x8 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float32x16) SetHi(y Float32x8) Float32x16 // SetHi returns x with its upper half set to y. @@ -9191,7 +9191,7 @@ func (x Float64x4) SetHi(y Float64x2) Float64x4 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float64x8) SetHi(y Float64x4) Float64x8 // SetHi returns x with its upper half set to y. @@ -9201,7 +9201,7 @@ func (x Int8x32) SetHi(y Int8x16) Int8x32 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int8x64) SetHi(y Int8x32) Int8x64 // SetHi returns x with its upper half set to y. @@ -9211,7 +9211,7 @@ func (x Int16x16) SetHi(y Int16x8) Int16x16 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int16x32) SetHi(y Int16x16) Int16x32 // SetHi returns x with its upper half set to y. @@ -9221,7 +9221,7 @@ func (x Int32x8) SetHi(y Int32x4) Int32x8 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int32x16) SetHi(y Int32x8) Int32x16 // SetHi returns x with its upper half set to y. @@ -9231,7 +9231,7 @@ func (x Int64x4) SetHi(y Int64x2) Int64x4 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int64x8) SetHi(y Int64x4) Int64x8 // SetHi returns x with its upper half set to y. @@ -9241,7 +9241,7 @@ func (x Uint8x32) SetHi(y Uint8x16) Uint8x32 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint8x64) SetHi(y Uint8x32) Uint8x64 // SetHi returns x with its upper half set to y. @@ -9251,7 +9251,7 @@ func (x Uint16x16) SetHi(y Uint16x8) Uint16x16 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint16x32) SetHi(y Uint16x16) Uint16x32 // SetHi returns x with its upper half set to y. @@ -9261,7 +9261,7 @@ func (x Uint32x8) SetHi(y Uint32x4) Uint32x8 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint32x16) SetHi(y Uint32x8) Uint32x16 // SetHi returns x with its upper half set to y. @@ -9271,7 +9271,7 @@ func (x Uint64x4) SetHi(y Uint64x2) Uint64x4 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint64x8) SetHi(y Uint64x4) Uint64x8 /* SetLo */ @@ -9283,7 +9283,7 @@ func (x Float32x8) SetLo(y Float32x4) Float32x8 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float32x16) SetLo(y Float32x8) Float32x16 // SetLo returns x with its lower half set to y. @@ -9293,7 +9293,7 @@ func (x Float64x4) SetLo(y Float64x2) Float64x4 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float64x8) SetLo(y Float64x4) Float64x8 // SetLo returns x with its lower half set to y. @@ -9303,7 +9303,7 @@ func (x Int8x32) SetLo(y Int8x16) Int8x32 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int8x64) SetLo(y Int8x32) Int8x64 // SetLo returns x with its lower half set to y. @@ -9313,7 +9313,7 @@ func (x Int16x16) SetLo(y Int16x8) Int16x16 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int16x32) SetLo(y Int16x16) Int16x32 // SetLo returns x with its lower half set to y. @@ -9323,7 +9323,7 @@ func (x Int32x8) SetLo(y Int32x4) Int32x8 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int32x16) SetLo(y Int32x8) Int32x16 // SetLo returns x with its lower half set to y. @@ -9333,7 +9333,7 @@ func (x Int64x4) SetLo(y Int64x2) Int64x4 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int64x8) SetLo(y Int64x4) Int64x8 // SetLo returns x with its lower half set to y. @@ -9343,7 +9343,7 @@ func (x Uint8x32) SetLo(y Uint8x16) Uint8x32 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint8x64) SetLo(y Uint8x32) Uint8x64 // SetLo returns x with its lower half set to y. @@ -9353,7 +9353,7 @@ func (x Uint16x16) SetLo(y Uint16x8) Uint16x16 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint16x32) SetLo(y Uint16x16) Uint16x32 // SetLo returns x with its lower half set to y. @@ -9363,7 +9363,7 @@ func (x Uint32x8) SetLo(y Uint32x4) Uint32x8 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint32x16) SetLo(y Uint32x8) Uint32x16 // SetLo returns x with its lower half set to y. @@ -9373,7 +9373,7 @@ func (x Uint64x4) SetLo(y Uint64x2) Uint64x4 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint64x8) SetLo(y Uint64x4) Uint64x8 /* ShiftAllLeft */ @@ -9390,7 +9390,7 @@ func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x32) ShiftAllLeft(y uint64) Int16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9405,7 +9405,7 @@ func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x16) ShiftAllLeft(y uint64) Int32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9420,7 +9420,7 @@ func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9435,7 +9435,7 @@ func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllLeft(y uint64) Uint16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9450,7 +9450,7 @@ func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllLeft(y uint64) Uint32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9465,7 +9465,7 @@ func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 /* ShiftAllLeftConcat */ @@ -9802,126 +9802,126 @@ func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftAllRight */ @@ -9938,7 +9938,7 @@ func (x Int16x16) ShiftAllRight(y uint64) Int16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x32) ShiftAllRight(y uint64) Int16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. @@ -9953,22 +9953,22 @@ func (x Int32x8) ShiftAllRight(y uint64) Int32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x16) ShiftAllRight(y uint64) Int32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x2) ShiftAllRight(y uint64) Int64x2 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x4) ShiftAllRight(y uint64) Int64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllRight(y uint64) Int64x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -9983,7 +9983,7 @@ func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllRight(y uint64) Uint16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -9998,7 +9998,7 @@ func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllRight(y uint64) Uint32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -10013,7 +10013,7 @@ func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 /* ShiftAllRightConcat */ @@ -10350,143 +10350,143 @@ func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64 // // This operation is applied selectively under a write mask. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftLeft */ // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10501,7 +10501,7 @@ func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10516,22 +10516,22 @@ func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10546,7 +10546,7 @@ func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10561,7 +10561,7 @@ func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 /* ShiftLeftConcat */ @@ -10826,143 +10826,143 @@ func (x Uint64x8) ShiftLeftConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) U // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x8) ShiftLeftMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x16) ShiftLeftMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x32) ShiftLeftMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x4) ShiftLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x8) ShiftLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x16) ShiftLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x2) ShiftLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x4) ShiftLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x8) ShiftLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftLeftMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftLeftMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftLeftMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x4) ShiftLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x8) ShiftLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x2) ShiftLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x4) ShiftLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRight */ // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x8) ShiftRight(y Int16x8) Int16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x16) ShiftRight(y Int16x16) Int16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x32) ShiftRight(y Int16x32) Int16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. @@ -10977,37 +10977,37 @@ func (x Int32x8) ShiftRight(y Int32x8) Int32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x16) ShiftRight(y Int32x16) Int32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x2) ShiftRight(y Int64x2) Int64x2 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x4) ShiftRight(y Int64x4) Int64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x8) ShiftRight(y Int64x8) Int64x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -11022,7 +11022,7 @@ func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -11037,7 +11037,7 @@ func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 /* ShiftRightConcat */ @@ -11302,126 +11302,126 @@ func (x Uint64x8) ShiftRightConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x8) ShiftRightMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x16) ShiftRightMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x32) ShiftRightMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x4) ShiftRightMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x8) ShiftRightMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x16) ShiftRightMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x2) ShiftRightMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x4) ShiftRightMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x8) ShiftRightMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftRightMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftRightMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftRightMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x4) ShiftRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x8) ShiftRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x2) ShiftRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Sqrt */ @@ -11438,7 +11438,7 @@ func (x Float32x8) Sqrt() Float32x8 // Sqrt computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x16) Sqrt() Float32x16 // Sqrt computes the square root of each element. @@ -11453,7 +11453,7 @@ func (x Float64x4) Sqrt() Float64x4 // Sqrt computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x8) Sqrt() Float64x8 /* SqrtMasked */ @@ -11462,42 +11462,42 @@ func (x Float64x8) Sqrt() Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x4) SqrtMasked(mask Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x8) SqrtMasked(mask Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x16) SqrtMasked(mask Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x2) SqrtMasked(mask Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x4) SqrtMasked(mask Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x8) SqrtMasked(mask Mask64x8) Float64x8 /* Sub */ @@ -11514,7 +11514,7 @@ func (x Float32x8) Sub(y Float32x8) Float32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x16) Sub(y Float32x16) Float32x16 // Sub subtracts corresponding elements of two vectors. @@ -11529,7 +11529,7 @@ func (x Float64x4) Sub(y Float64x4) Float64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x8) Sub(y Float64x8) Float64x8 // Sub subtracts corresponding elements of two vectors. @@ -11544,7 +11544,7 @@ func (x Int8x32) Sub(y Int8x32) Int8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x64) Sub(y Int8x64) Int8x64 // Sub subtracts corresponding elements of two vectors. @@ -11559,7 +11559,7 @@ func (x Int16x16) Sub(y Int16x16) Int16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x32) Sub(y Int16x32) Int16x32 // Sub subtracts corresponding elements of two vectors. @@ -11574,7 +11574,7 @@ func (x Int32x8) Sub(y Int32x8) Int32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x16) Sub(y Int32x16) Int32x16 // Sub subtracts corresponding elements of two vectors. @@ -11589,7 +11589,7 @@ func (x Int64x4) Sub(y Int64x4) Int64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x8) Sub(y Int64x8) Int64x8 // Sub subtracts corresponding elements of two vectors. @@ -11604,7 +11604,7 @@ func (x Uint8x32) Sub(y Uint8x32) Uint8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x64) Sub(y Uint8x64) Uint8x64 // Sub subtracts corresponding elements of two vectors. @@ -11619,7 +11619,7 @@ func (x Uint16x16) Sub(y Uint16x16) Uint16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x32) Sub(y Uint16x32) Uint16x32 // Sub subtracts corresponding elements of two vectors. @@ -11634,7 +11634,7 @@ func (x Uint32x8) Sub(y Uint32x8) Uint32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x16) Sub(y Uint32x16) Uint32x16 // Sub subtracts corresponding elements of two vectors. @@ -11649,7 +11649,7 @@ func (x Uint64x4) Sub(y Uint64x4) Uint64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* SubMasked */ @@ -11658,210 +11658,210 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x4) SubMasked(y Float32x4, mask Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x8) SubMasked(y Float32x8, mask Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x16) SubMasked(y Float32x16, mask Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x2) SubMasked(y Float64x2, mask Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x4) SubMasked(y Float64x4, mask Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x8) SubMasked(y Float64x8, mask Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x16) SubMasked(y Int8x16, mask Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x32) SubMasked(y Int8x32, mask Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x64) SubMasked(y Int8x64, mask Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x8) SubMasked(y Int16x8, mask Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x16) SubMasked(y Int16x16, mask Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x32) SubMasked(y Int16x32, mask Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x4) SubMasked(y Int32x4, mask Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x8) SubMasked(y Int32x8, mask Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x16) SubMasked(y Int32x16, mask Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x2) SubMasked(y Int64x2, mask Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x4) SubMasked(y Int64x4, mask Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x8) SubMasked(y Int64x8, mask Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x16) SubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x32) SubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x64) SubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x8) SubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x16) SubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x32) SubMasked(y Uint16x32, mask Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x4) SubMasked(y Uint32x4, mask Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x8) SubMasked(y Uint32x8, mask Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x16) SubMasked(y Uint32x16, mask Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x2) SubMasked(y Uint64x2, mask Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* SubPairs */ @@ -11966,7 +11966,7 @@ func (x Int8x32) SubSaturated(y Int8x32) Int8x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x64) SubSaturated(y Int8x64) Int8x64 // SubSaturated subtracts corresponding elements of two vectors with saturation. @@ -11981,7 +11981,7 @@ func (x Int16x16) SubSaturated(y Int16x16) Int16x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x32) SubSaturated(y Int16x32) Int16x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. @@ -11996,7 +11996,7 @@ func (x Uint8x32) SubSaturated(y Uint8x32) Uint8x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x64) SubSaturated(y Uint8x64) Uint8x64 // SubSaturated subtracts corresponding elements of two vectors with saturation. @@ -12011,7 +12011,7 @@ func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 /* SubSaturatedMasked */ @@ -12020,84 +12020,84 @@ func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x16) SubSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x32) SubSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x64) SubSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x8) SubSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x16) SubSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x32) SubSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x16) SubSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x32) SubSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x64) SubSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x8) SubSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x16) SubSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x32) SubSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Trunc */ @@ -12128,42 +12128,42 @@ func (x Float64x4) Trunc() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaled(prec uint8) Float32x4 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaled(prec uint8) Float32x8 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaled(prec uint8) Float32x16 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaled(prec uint8) Float64x2 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaled(prec uint8) Float64x4 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaled(prec uint8) Float64x8 /* TruncScaledMasked */ @@ -12174,7 +12174,7 @@ func (x Float64x8) TruncScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 // TruncScaledMasked truncates elements with specified precision. @@ -12183,7 +12183,7 @@ func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 // TruncScaledMasked truncates elements with specified precision. @@ -12192,7 +12192,7 @@ func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 // TruncScaledMasked truncates elements with specified precision. @@ -12201,7 +12201,7 @@ func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 // TruncScaledMasked truncates elements with specified precision. @@ -12210,7 +12210,7 @@ func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 // TruncScaledMasked truncates elements with specified precision. @@ -12219,7 +12219,7 @@ func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* TruncScaledResidue */ @@ -12228,42 +12228,42 @@ func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaledResidue(prec uint8) Float32x4 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaledResidue(prec uint8) Float32x8 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaledResidue(prec uint8) Float32x16 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaledResidue(prec uint8) Float64x2 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 /* TruncScaledResidueMasked */ @@ -12274,7 +12274,7 @@ func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12283,7 +12283,7 @@ func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12292,7 +12292,7 @@ func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12301,7 +12301,7 @@ func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12310,7 +12310,7 @@ func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12319,7 +12319,7 @@ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Xor */ @@ -12336,7 +12336,7 @@ func (x Int8x32) Xor(y Int8x32) Int8x32 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int8x64) Xor(y Int8x64) Int8x64 // Xor performs a bitwise XOR operation between two vectors. @@ -12351,7 +12351,7 @@ func (x Int16x16) Xor(y Int16x16) Int16x16 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int16x32) Xor(y Int16x32) Int16x32 // Xor performs a bitwise XOR operation between two vectors. @@ -12366,7 +12366,7 @@ func (x Int32x8) Xor(y Int32x8) Int32x8 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x16) Xor(y Int32x16) Int32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -12381,7 +12381,7 @@ func (x Int64x4) Xor(y Int64x4) Int64x4 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x8) Xor(y Int64x8) Int64x8 // Xor performs a bitwise XOR operation between two vectors. @@ -12396,7 +12396,7 @@ func (x Uint8x32) Xor(y Uint8x32) Uint8x32 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint8x64) Xor(y Uint8x64) Uint8x64 // Xor performs a bitwise XOR operation between two vectors. @@ -12411,7 +12411,7 @@ func (x Uint16x16) Xor(y Uint16x16) Uint16x16 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint16x32) Xor(y Uint16x32) Uint16x32 // Xor performs a bitwise XOR operation between two vectors. @@ -12426,7 +12426,7 @@ func (x Uint32x8) Xor(y Uint32x8) Uint32x8 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x16) Xor(y Uint32x16) Uint32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -12441,7 +12441,7 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ @@ -12450,84 +12450,84 @@ func (x Uint64x8) Xor(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x4) XorMasked(y Int32x4, mask Mask32x4) Int32x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x8) XorMasked(y Int32x8, mask Mask32x8) Int32x8 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x16) XorMasked(y Int32x16, mask Mask32x16) Int32x16 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x2) XorMasked(y Int64x2, mask Mask64x2) Int64x2 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x4) XorMasked(y Int64x4, mask Mask64x4) Int64x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x8) XorMasked(y Int64x8, mask Mask64x8) Int64x8 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x4) XorMasked(y Uint32x4, mask Mask32x4) Uint32x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x8) XorMasked(y Uint32x8, mask Mask32x8) Uint32x8 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x16) XorMasked(y Uint32x16, mask Mask32x16) Uint32x16 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x2) XorMasked(y Uint64x2, mask Mask64x2) Uint64x2 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* blend */ @@ -12551,7 +12551,7 @@ func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMB, CPU Feature: AVX512BW +// Asm: VPBLENDMB, CPU Feature: AVX512 func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 // blendMasked blends two vectors based on mask values, choosing either @@ -12559,7 +12559,7 @@ func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMW, CPU Feature: AVX512BW +// Asm: VPBLENDMW, CPU Feature: AVX512 func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 // blendMasked blends two vectors based on mask values, choosing either @@ -12567,7 +12567,7 @@ func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMD, CPU Feature: AVX512F +// Asm: VPBLENDMD, CPU Feature: AVX512 func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // blendMasked blends two vectors based on mask values, choosing either @@ -12575,7 +12575,7 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMQ, CPU Feature: AVX512F +// Asm: VPBLENDMQ, CPU Feature: AVX512 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 // Float64x2 converts from Float32x4 to Float64x2 From e33eb1a7a53a218f86847fc1af354bc54fa8cae4 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 16:02:53 -0400 Subject: [PATCH 124/139] [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694860 Change-Id: Ifa7c0e9749b1d9a20f31b70aafe563d7844ce6b0 Reviewed-on: https://go-review.googlesource.com/c/go/+/694917 Auto-Submit: Austin Clements Reviewed-by: David Chase Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 1 + src/cmd/compile/internal/ssa/_gen/simdgenericOps.go | 1 + src/cmd/compile/internal/ssagen/simdintrinsics.go | 1 + 3 files changed, 3 insertions(+) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 386415ac41547b..afea4c0a46e4db 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,4 +1,5 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + package main func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 2378f196453927..fea701e174f0bb 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1,4 +1,5 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + package main func simdGenericOps() []opData { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 02d68a57ccc542..e14e02a71e5444 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1,4 +1,5 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + package ssagen import ( From 702ee2d51ed0522e3942d0dd2819e2c8cb8a10f2 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 16:03:41 -0400 Subject: [PATCH 125/139] [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694861 Change-Id: I2af1aaacbe9374d98b13be972713fc2cb1177927 Reviewed-on: https://go-review.googlesource.com/c/go/+/694918 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Junyang Shao Auto-Submit: Austin Clements --- src/simd/cpu.go | 74 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 22 deletions(-) diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 7bc511652549c2..cbde9a8e1ff2eb 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -1,62 +1,92 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. //go:build goexperiment.simd -// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain -// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. - package simd import "internal/cpu" -// HasAVX checks AVX CPU feature. +// HasAVX returns whether the CPU supports the AVX feature. +// +// HasAVX is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX() bool { return cpu.X86.HasAVX } -// HasAVXVNNI checks AVX CPU feature VNNI. -func HasAVXVNNI() bool { - return cpu.X86.HasAVXVNNI -} - -// HasAVX2 checks AVX2 CPU feature. +// HasAVX2 returns whether the CPU supports the AVX2 feature. +// +// HasAVX2 is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX2() bool { return cpu.X86.HasAVX2 } -// HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. +// HasAVX512 returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. +// +// These five CPU features are bundled together, and no use of AVX-512 +// is allowed unless all of these features are supported together. +// Nearly every CPU that has shipped with any support for AVX-512 has +// supported all five of these features. +// +// HasAVX512 is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512() bool { return cpu.X86.HasAVX512 } -// HasAVX512GFNI checks AVX512 CPU feature GFNI. +// HasAVX512BITALG returns whether the CPU supports the AVX512BITALG feature. +// +// HasAVX512BITALG is defined on all GOARCHes, but will only return true on +// GOARCH amd64. +func HasAVX512BITALG() bool { + return cpu.X86.HasAVX512BITALG +} + +// HasAVX512GFNI returns whether the CPU supports the AVX512GFNI feature. +// +// HasAVX512GFNI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512GFNI() bool { return cpu.X86.HasAVX512GFNI } -// HasAVX512VBMI checks AVX512 CPU feature VBMI +// HasAVX512VBMI returns whether the CPU supports the AVX512VBMI feature. +// +// HasAVX512VBMI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VBMI() bool { return cpu.X86.HasAVX512VBMI } -// HasAVX512VBMI2 checks AVX512 CPU feature VBMI2 +// HasAVX512VBMI2 returns whether the CPU supports the AVX512VBMI2 feature. +// +// HasAVX512VBMI2 is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VBMI2() bool { return cpu.X86.HasAVX512VBMI2 } -// HasAVX512VNNI checks AVX512 CPU feature VNNI +// HasAVX512VNNI returns whether the CPU supports the AVX512VNNI feature. +// +// HasAVX512VNNI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VNNI() bool { return cpu.X86.HasAVX512VNNI } -// HasAVX512VPOPCNTDQ checks AVX512 CPU feature VPOPCNTDQ +// HasAVX512VPOPCNTDQ returns whether the CPU supports the AVX512VPOPCNTDQ feature. +// +// HasAVX512VPOPCNTDQ is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VPOPCNTDQ() bool { return cpu.X86.HasAVX512VPOPCNTDQ } -// HasAVX512BITALG checks AVX512 CPU feature BITALG -func HasAVX512BITALG() bool { - return cpu.X86.HasAVX512BITALG +// HasAVXVNNI returns whether the CPU supports the AVXVNNI feature. +// +// HasAVXVNNI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. +func HasAVXVNNI() bool { + return cpu.X86.HasAVXVNNI } From 08ab8e24a310944768717356e188a14c46c7447b Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 12 Aug 2025 17:01:55 -0400 Subject: [PATCH 126/139] [dev.simd] cmd/compile: generated code from 'fix generated rules for shifts' this code is generated by simdgen CL 695455 Change-Id: I5afdc209a50b49d68e120130e0578e4666bf8749 Reviewed-on: https://go-review.googlesource.com/c/go/+/695475 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- .../compile/internal/ssa/_gen/simdAMD64.rules | 180 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 4130 +++++++---------- 2 files changed, 1777 insertions(+), 2533 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index abfa10020dec49..80cddaae79e2a1 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1444,42 +1444,33 @@ (SetLoUint32x16 x y) => (VINSERTI64X4512 [0] x y) (SetLoUint64x4 x y) => (VINSERTI128256 [0] x y) (SetLoUint64x8 x y) => (VINSERTI64X4512 [0] x y) -(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) -(ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) -(ShiftAllLeftInt16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) -(ShiftAllLeftInt16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) -(ShiftAllLeftInt32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) -(ShiftAllLeftInt32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) -(ShiftAllLeftInt32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) -(ShiftAllLeftInt64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) -(ShiftAllLeftInt64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) -(ShiftAllLeftInt64x8 x y) => (VPSLLQ512 x y) -(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) -(ShiftAllLeftUint16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) -(ShiftAllLeftUint16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) -(ShiftAllLeftUint16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) -(ShiftAllLeftUint32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) -(ShiftAllLeftUint32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) -(ShiftAllLeftUint32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) -(ShiftAllLeftUint64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) -(ShiftAllLeftUint64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) -(ShiftAllLeftUint64x8 x y) => (VPSLLQ512 x y) +(ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) +(VPSLLW128 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) +(ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) +(VPSLLW256 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) +(ShiftAllLeftInt16x32 ...) => (VPSLLW512 ...) +(VPSLLW512 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) +(ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) +(VPSLLD128 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) +(ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) +(VPSLLD256 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) +(ShiftAllLeftInt32x16 ...) => (VPSLLD512 ...) +(VPSLLD512 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) +(ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) +(VPSLLQ128 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) +(ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) +(VPSLLQ256 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) +(ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) +(VPSLLQ512 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) +(ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) +(ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftUint16x32 ...) => (VPSLLW512 ...) +(ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) +(ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftUint32x16 ...) => (VPSLLD512 ...) +(ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) +(ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) +(ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) (ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) (ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) (ShiftAllLeftConcatInt16x32 ...) => (VPSHLDW512 ...) @@ -1516,78 +1507,60 @@ (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) +(VPSLLWMasked128 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x mask) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) +(VPSLLWMasked256 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x mask) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) +(VPSLLWMasked512 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x mask) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) +(VPSLLDMasked128 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x mask) (ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) +(VPSLLDMasked256 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x mask) (ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) +(VPSLLDMasked512 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x mask) (ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) +(VPSLLQMasked128 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x mask) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) +(VPSLLQMasked256 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x mask) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) +(VPSLLQMasked512 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x mask) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) -(ShiftAllRightInt16x8 x y) => (VPSRAW128 x y) -(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [uint8(c)] x) -(ShiftAllRightInt16x16 x y) => (VPSRAW256 x y) -(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [uint8(c)] x) -(ShiftAllRightInt16x32 x y) => (VPSRAW512 x y) -(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [uint8(c)] x) -(ShiftAllRightInt32x4 x y) => (VPSRAD128 x y) -(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [uint8(c)] x) -(ShiftAllRightInt32x8 x y) => (VPSRAD256 x y) -(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [uint8(c)] x) -(ShiftAllRightInt32x16 x y) => (VPSRAD512 x y) -(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [uint8(c)] x) -(ShiftAllRightInt64x2 x y) => (VPSRAQ128 x y) -(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [uint8(c)] x) -(ShiftAllRightInt64x4 x y) => (VPSRAQ256 x y) -(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [uint8(c)] x) -(ShiftAllRightInt64x8 x y) => (VPSRAQ512 x y) -(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [uint8(c)] x) -(ShiftAllRightUint16x8 x y) => (VPSRLW128 x y) -(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [uint8(c)] x) -(ShiftAllRightUint16x16 x y) => (VPSRLW256 x y) -(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [uint8(c)] x) -(ShiftAllRightUint16x32 x y) => (VPSRLW512 x y) -(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [uint8(c)] x) -(ShiftAllRightUint32x4 x y) => (VPSRLD128 x y) -(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [uint8(c)] x) -(ShiftAllRightUint32x8 x y) => (VPSRLD256 x y) -(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [uint8(c)] x) -(ShiftAllRightUint32x16 x y) => (VPSRLD512 x y) -(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [uint8(c)] x) -(ShiftAllRightUint64x2 x y) => (VPSRLQ128 x y) -(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [uint8(c)] x) -(ShiftAllRightUint64x4 x y) => (VPSRLQ256 x y) -(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [uint8(c)] x) -(ShiftAllRightUint64x8 x y) => (VPSRLQ512 x y) +(ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) +(VPSRAW128 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) +(ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) +(VPSRAW256 x (MOVQconst [c])) => (VPSRAW256const [uint8(c)] x) +(ShiftAllRightInt16x32 ...) => (VPSRAW512 ...) +(VPSRAW512 x (MOVQconst [c])) => (VPSRAW512const [uint8(c)] x) +(ShiftAllRightInt32x4 ...) => (VPSRAD128 ...) +(VPSRAD128 x (MOVQconst [c])) => (VPSRAD128const [uint8(c)] x) +(ShiftAllRightInt32x8 ...) => (VPSRAD256 ...) +(VPSRAD256 x (MOVQconst [c])) => (VPSRAD256const [uint8(c)] x) +(ShiftAllRightInt32x16 ...) => (VPSRAD512 ...) +(VPSRAD512 x (MOVQconst [c])) => (VPSRAD512const [uint8(c)] x) +(ShiftAllRightInt64x2 ...) => (VPSRAQ128 ...) +(VPSRAQ128 x (MOVQconst [c])) => (VPSRAQ128const [uint8(c)] x) +(ShiftAllRightInt64x4 ...) => (VPSRAQ256 ...) +(VPSRAQ256 x (MOVQconst [c])) => (VPSRAQ256const [uint8(c)] x) +(ShiftAllRightInt64x8 ...) => (VPSRAQ512 ...) +(VPSRAQ512 x (MOVQconst [c])) => (VPSRAQ512const [uint8(c)] x) +(ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) +(ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightUint16x32 ...) => (VPSRLW512 ...) +(ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) +(ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightUint32x16 ...) => (VPSRLD512 ...) +(ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) +(ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) +(ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) (ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) (ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) (ShiftAllRightConcatInt16x32 ...) => (VPSHRDW512 ...) @@ -1624,41 +1597,32 @@ (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) +(VPSRAWMasked128 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x mask) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) +(VPSRAWMasked256 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x mask) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) +(VPSRAWMasked512 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x mask) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) +(VPSRADMasked128 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x mask) (ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) +(VPSRADMasked256 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x mask) (ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) +(VPSRADMasked512 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x mask) (ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) +(VPSRAQMasked128 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x mask) (ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) +(VPSRAQMasked256 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x mask) (ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) +(VPSRAQMasked512 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x mask) (ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index fbe8a448d8d14a..c5367adefec432 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -531,6 +531,78 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v) case OpAMD64VPMOVVec8x64ToM: return rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v) + case OpAMD64VPSLLD128: + return rewriteValueAMD64_OpAMD64VPSLLD128(v) + case OpAMD64VPSLLD256: + return rewriteValueAMD64_OpAMD64VPSLLD256(v) + case OpAMD64VPSLLD512: + return rewriteValueAMD64_OpAMD64VPSLLD512(v) + case OpAMD64VPSLLDMasked128: + return rewriteValueAMD64_OpAMD64VPSLLDMasked128(v) + case OpAMD64VPSLLDMasked256: + return rewriteValueAMD64_OpAMD64VPSLLDMasked256(v) + case OpAMD64VPSLLDMasked512: + return rewriteValueAMD64_OpAMD64VPSLLDMasked512(v) + case OpAMD64VPSLLQ128: + return rewriteValueAMD64_OpAMD64VPSLLQ128(v) + case OpAMD64VPSLLQ256: + return rewriteValueAMD64_OpAMD64VPSLLQ256(v) + case OpAMD64VPSLLQ512: + return rewriteValueAMD64_OpAMD64VPSLLQ512(v) + case OpAMD64VPSLLQMasked128: + return rewriteValueAMD64_OpAMD64VPSLLQMasked128(v) + case OpAMD64VPSLLQMasked256: + return rewriteValueAMD64_OpAMD64VPSLLQMasked256(v) + case OpAMD64VPSLLQMasked512: + return rewriteValueAMD64_OpAMD64VPSLLQMasked512(v) + case OpAMD64VPSLLW128: + return rewriteValueAMD64_OpAMD64VPSLLW128(v) + case OpAMD64VPSLLW256: + return rewriteValueAMD64_OpAMD64VPSLLW256(v) + case OpAMD64VPSLLW512: + return rewriteValueAMD64_OpAMD64VPSLLW512(v) + case OpAMD64VPSLLWMasked128: + return rewriteValueAMD64_OpAMD64VPSLLWMasked128(v) + case OpAMD64VPSLLWMasked256: + return rewriteValueAMD64_OpAMD64VPSLLWMasked256(v) + case OpAMD64VPSLLWMasked512: + return rewriteValueAMD64_OpAMD64VPSLLWMasked512(v) + case OpAMD64VPSRAD128: + return rewriteValueAMD64_OpAMD64VPSRAD128(v) + case OpAMD64VPSRAD256: + return rewriteValueAMD64_OpAMD64VPSRAD256(v) + case OpAMD64VPSRAD512: + return rewriteValueAMD64_OpAMD64VPSRAD512(v) + case OpAMD64VPSRADMasked128: + return rewriteValueAMD64_OpAMD64VPSRADMasked128(v) + case OpAMD64VPSRADMasked256: + return rewriteValueAMD64_OpAMD64VPSRADMasked256(v) + case OpAMD64VPSRADMasked512: + return rewriteValueAMD64_OpAMD64VPSRADMasked512(v) + case OpAMD64VPSRAQ128: + return rewriteValueAMD64_OpAMD64VPSRAQ128(v) + case OpAMD64VPSRAQ256: + return rewriteValueAMD64_OpAMD64VPSRAQ256(v) + case OpAMD64VPSRAQ512: + return rewriteValueAMD64_OpAMD64VPSRAQ512(v) + case OpAMD64VPSRAQMasked128: + return rewriteValueAMD64_OpAMD64VPSRAQMasked128(v) + case OpAMD64VPSRAQMasked256: + return rewriteValueAMD64_OpAMD64VPSRAQMasked256(v) + case OpAMD64VPSRAQMasked512: + return rewriteValueAMD64_OpAMD64VPSRAQMasked512(v) + case OpAMD64VPSRAW128: + return rewriteValueAMD64_OpAMD64VPSRAW128(v) + case OpAMD64VPSRAW256: + return rewriteValueAMD64_OpAMD64VPSRAW256(v) + case OpAMD64VPSRAW512: + return rewriteValueAMD64_OpAMD64VPSRAW512(v) + case OpAMD64VPSRAWMasked128: + return rewriteValueAMD64_OpAMD64VPSRAWMasked128(v) + case OpAMD64VPSRAWMasked256: + return rewriteValueAMD64_OpAMD64VPSRAWMasked256(v) + case OpAMD64VPSRAWMasked512: + return rewriteValueAMD64_OpAMD64VPSRAWMasked512(v) case OpAMD64XADDLlock: return rewriteValueAMD64_OpAMD64XADDLlock(v) case OpAMD64XADDQlock: @@ -4662,23 +4734,32 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHLDQ512 return true case OpShiftAllLeftInt16x16: - return rewriteValueAMD64_OpShiftAllLeftInt16x16(v) + v.Op = OpAMD64VPSLLW256 + return true case OpShiftAllLeftInt16x32: - return rewriteValueAMD64_OpShiftAllLeftInt16x32(v) + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftInt16x8: - return rewriteValueAMD64_OpShiftAllLeftInt16x8(v) + v.Op = OpAMD64VPSLLW128 + return true case OpShiftAllLeftInt32x16: - return rewriteValueAMD64_OpShiftAllLeftInt32x16(v) + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftInt32x4: - return rewriteValueAMD64_OpShiftAllLeftInt32x4(v) + v.Op = OpAMD64VPSLLD128 + return true case OpShiftAllLeftInt32x8: - return rewriteValueAMD64_OpShiftAllLeftInt32x8(v) + v.Op = OpAMD64VPSLLD256 + return true case OpShiftAllLeftInt64x2: - return rewriteValueAMD64_OpShiftAllLeftInt64x2(v) + v.Op = OpAMD64VPSLLQ128 + return true case OpShiftAllLeftInt64x4: - return rewriteValueAMD64_OpShiftAllLeftInt64x4(v) + v.Op = OpAMD64VPSLLQ256 + return true case OpShiftAllLeftInt64x8: - return rewriteValueAMD64_OpShiftAllLeftInt64x8(v) + v.Op = OpAMD64VPSLLQ512 + return true case OpShiftAllLeftMaskedInt16x16: return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v) case OpShiftAllLeftMaskedInt16x32: @@ -4716,23 +4797,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftMaskedUint64x8: return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v) case OpShiftAllLeftUint16x16: - return rewriteValueAMD64_OpShiftAllLeftUint16x16(v) + v.Op = OpAMD64VPSLLW256 + return true case OpShiftAllLeftUint16x32: - return rewriteValueAMD64_OpShiftAllLeftUint16x32(v) + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftUint16x8: - return rewriteValueAMD64_OpShiftAllLeftUint16x8(v) + v.Op = OpAMD64VPSLLW128 + return true case OpShiftAllLeftUint32x16: - return rewriteValueAMD64_OpShiftAllLeftUint32x16(v) + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftUint32x4: - return rewriteValueAMD64_OpShiftAllLeftUint32x4(v) + v.Op = OpAMD64VPSLLD128 + return true case OpShiftAllLeftUint32x8: - return rewriteValueAMD64_OpShiftAllLeftUint32x8(v) + v.Op = OpAMD64VPSLLD256 + return true case OpShiftAllLeftUint64x2: - return rewriteValueAMD64_OpShiftAllLeftUint64x2(v) + v.Op = OpAMD64VPSLLQ128 + return true case OpShiftAllLeftUint64x4: - return rewriteValueAMD64_OpShiftAllLeftUint64x4(v) + v.Op = OpAMD64VPSLLQ256 + return true case OpShiftAllLeftUint64x8: - return rewriteValueAMD64_OpShiftAllLeftUint64x8(v) + v.Op = OpAMD64VPSLLQ512 + return true case OpShiftAllRightConcatInt16x16: v.Op = OpAMD64VPSHRDW256 return true @@ -4824,23 +4914,32 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: - return rewriteValueAMD64_OpShiftAllRightInt16x16(v) + v.Op = OpAMD64VPSRAW256 + return true case OpShiftAllRightInt16x32: - return rewriteValueAMD64_OpShiftAllRightInt16x32(v) + v.Op = OpAMD64VPSRAW512 + return true case OpShiftAllRightInt16x8: - return rewriteValueAMD64_OpShiftAllRightInt16x8(v) + v.Op = OpAMD64VPSRAW128 + return true case OpShiftAllRightInt32x16: - return rewriteValueAMD64_OpShiftAllRightInt32x16(v) + v.Op = OpAMD64VPSRAD512 + return true case OpShiftAllRightInt32x4: - return rewriteValueAMD64_OpShiftAllRightInt32x4(v) + v.Op = OpAMD64VPSRAD128 + return true case OpShiftAllRightInt32x8: - return rewriteValueAMD64_OpShiftAllRightInt32x8(v) + v.Op = OpAMD64VPSRAD256 + return true case OpShiftAllRightInt64x2: - return rewriteValueAMD64_OpShiftAllRightInt64x2(v) + v.Op = OpAMD64VPSRAQ128 + return true case OpShiftAllRightInt64x4: - return rewriteValueAMD64_OpShiftAllRightInt64x4(v) + v.Op = OpAMD64VPSRAQ256 + return true case OpShiftAllRightInt64x8: - return rewriteValueAMD64_OpShiftAllRightInt64x8(v) + v.Op = OpAMD64VPSRAQ512 + return true case OpShiftAllRightMaskedInt16x16: return rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v) case OpShiftAllRightMaskedInt16x32: @@ -4878,23 +4977,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) case OpShiftAllRightUint16x16: - return rewriteValueAMD64_OpShiftAllRightUint16x16(v) + v.Op = OpAMD64VPSRLW256 + return true case OpShiftAllRightUint16x32: - return rewriteValueAMD64_OpShiftAllRightUint16x32(v) + v.Op = OpAMD64VPSRLW512 + return true case OpShiftAllRightUint16x8: - return rewriteValueAMD64_OpShiftAllRightUint16x8(v) + v.Op = OpAMD64VPSRLW128 + return true case OpShiftAllRightUint32x16: - return rewriteValueAMD64_OpShiftAllRightUint32x16(v) + v.Op = OpAMD64VPSRLD512 + return true case OpShiftAllRightUint32x4: - return rewriteValueAMD64_OpShiftAllRightUint32x4(v) + v.Op = OpAMD64VPSRLD128 + return true case OpShiftAllRightUint32x8: - return rewriteValueAMD64_OpShiftAllRightUint32x8(v) + v.Op = OpAMD64VPSRLD256 + return true case OpShiftAllRightUint64x2: - return rewriteValueAMD64_OpShiftAllRightUint64x2(v) + v.Op = OpAMD64VPSRLQ128 + return true case OpShiftAllRightUint64x4: - return rewriteValueAMD64_OpShiftAllRightUint64x4(v) + v.Op = OpAMD64VPSRLQ256 + return true case OpShiftAllRightUint64x8: - return rewriteValueAMD64_OpShiftAllRightUint64x8(v) + v.Op = OpAMD64VPSRLQ512 + return true case OpShiftLeftConcatInt16x16: v.Op = OpAMD64VPSHLDVW256 return true @@ -27713,416 +27821,1100 @@ func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XADDLlock [off1+off2] {sym} val ptr mem) + // match: (VPSLLD128 x (MOVQconst [c])) + // result: (VPSLLD128const [uint8(c)] x) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XADDLlock) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XADDQlock [off1+off2] {sym} val ptr mem) + // match: (VPSLLD256 x (MOVQconst [c])) + // result: (VPSLLD256const [uint8(c)] x) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XADDQlock) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XCHGL [off1+off2] {sym} val ptr mem) + // match: (VPSLLD512 x (MOVQconst [c])) + // result: (VPSLLD512const [uint8(c)] x) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGL) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB - // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [uint8(c)] x mask) for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - sym2 := auxToSym(v_1.Aux) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGL) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XCHGQ [off1+off2] {sym} val ptr mem) + // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [uint8(c)] x mask) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGQ) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB - // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [uint8(c)] x mask) for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - sym2 := auxToSym(v_1.Aux) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGQ) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XORL (SHLL (MOVLconst [1]) y) x) - // result: (BTCL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { - continue - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { - continue - } - x := v_1 - v.reset(OpAMD64BTCL) - v.AddArg2(x, y) - return true - } - break - } - // match: (XORL x (MOVLconst [c])) - // result: (XORLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpAMD64MOVLconst { - continue - } - c := auxIntToInt32(v_1.AuxInt) - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (XORL x x) - // result: (MOVLconst [0]) + // match: (VPSLLQ128 x (MOVQconst [c])) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 - if x != v_1 { + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(0) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (XORLload x [off] {sym} ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64MOVLload { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - continue - } - v.reset(OpAMD64XORLload) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - // match: (XORL x (ADDLconst [-1] x)) - // cond: buildcfg.GOAMD64 >= 3 - // result: (BLSMSKL x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ256 x (MOVQconst [c])) + // result: (VPSLLQ256const [uint8(c)] x) for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { - continue - } - v.reset(OpAMD64BLSMSKL) - v.AddArg(x) - return true + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break } - break + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true } return false } -func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XORLconst [1] (SETNE x)) - // result: (SETEQ x) + // match: (VPSLLQ512 x (MOVQconst [c])) + // result: (VPSLLQ512const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETEQ) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETEQ x)) - // result: (SETNE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETNE) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETL x)) - // result: (SETGE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETGE) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETGE x)) - // result: (SETL x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETL) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETLE x)) - // result: (SETG x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW128 x (MOVQconst [c])) + // result: (VPSLLW128const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETG) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETG x)) - // result: (SETLE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW256 x (MOVQconst [c])) + // result: (VPSLLW256const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETLE) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETB x)) - // result: (SETAE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW512 x (MOVQconst [c])) + // result: (VPSLLW512const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETAE) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETAE x)) - // result: (SETB x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETB) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETBE x)) - // result: (SETA x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETA) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETA x)) - // result: (SETBE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETBE) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [c] (XORLconst [d] x)) - // result: (XORLconst [c ^ d] x) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD128 x (MOVQconst [c])) + // result: (VPSRAD128const [uint8(c)] x) for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpAMD64XORLconst { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - d := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(c ^ d) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [c] x) - // cond: c==0 - // result: x + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD256 x (MOVQconst [c])) + // result: (VPSRAD256const [uint8(c)] x) for { - c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c == 0) { + if v_1.Op != OpAMD64MOVQconst { break } - v.copyOf(x) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (XORLconst [c] (MOVLconst [d])) - // result: (MOVLconst [c^d]) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD512 x (MOVQconst [c])) + // result: (VPSRAD512const [uint8(c)] x) for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpAMD64MOVLconst { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - d := auxIntToInt32(v_0.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(c ^ d) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + // match: (VPSRADMasked128 x (MOVQconst [c]) mask) + // result: (VPSRADMasked128const [uint8(c)] x mask) for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked256 x (MOVQconst [c]) mask) + // result: (VPSRADMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked512 x (MOVQconst [c]) mask) + // result: (VPSRADMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ128 x (MOVQconst [c])) + // result: (VPSRAQ128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ256 x (MOVQconst [c])) + // result: (VPSRAQ256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ512 x (MOVQconst [c])) + // result: (VPSRAQ512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW128 x (MOVQconst [c])) + // result: (VPSRAW128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW256 x (MOVQconst [c])) + // result: (VPSRAW256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW512 x (MOVQconst [c])) + // result: (VPSRAW512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XADDLlock [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XADDLlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XADDQlock [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XADDQlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XCHGL [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XCHGL) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + break + } + v.reset(OpAMD64XCHGL) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XCHGQ [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XCHGQ) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + break + } + v.reset(OpAMD64XCHGQ) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORL (SHLL (MOVLconst [1]) y) x) + // result: (BTCL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTCL) + v.AddArg2(x, y) + return true + } + break + } + // match: (XORL x (MOVLconst [c])) + // result: (XORLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XORL x x) + // result: (MOVLconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (XORLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64XORLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (XORL x (ADDLconst [-1] x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (BLSMSKL x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64BLSMSKL) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORLconst [1] (SETNE x)) + // result: (SETEQ x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETEQ) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETEQ x)) + // result: (SETNE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETNE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETL x)) + // result: (SETGE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETGE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETGE x)) + // result: (SETL x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETL) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETLE x)) + // result: (SETG x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETG) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETG x)) + // result: (SETLE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETLE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETB x)) + // result: (SETAE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETAE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETAE x)) + // result: (SETB x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETB) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETBE x)) + // result: (SETA x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETA) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETA x)) + // result: (SETBE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETBE) + v.AddArg(x) + return true + } + // match: (XORLconst [c] (XORLconst [d] x)) + // result: (XORLconst [c ^ d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64XORLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + // match: (XORLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (XORLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } @@ -51779,2188 +52571,1252 @@ func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SetHiFloat32x16 x y) - // result: (VINSERTF64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiFloat32x8 x y) - // result: (VINSERTF128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiFloat64x4 x y) - // result: (VINSERTF128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiFloat64x8 x y) - // result: (VINSERTF64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt16x16 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt16x32 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt32x16 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt32x8 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt64x4 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt64x8 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt8x32 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt8x64 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint16x16 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint16x32 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint32x16 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint32x8 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint64x4 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint64x8 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint8x32 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint8x64 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat32x16 x y) - // result: (VINSERTF64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat32x8 x y) - // result: (VINSERTF128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat64x4 x y) - // result: (VINSERTF128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat64x8 x y) - // result: (VINSERTF64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt16x16 x y) - // result: (VINSERTI128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt16x32 x y) - // result: (VINSERTI64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt32x16 x y) - // result: (VINSERTI64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt32x8 x y) - // result: (VINSERTI128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt64x4 x y) - // result: (VINSERTI128256 [0] x y) + // result: (VINSERTF64X4512 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt64x8 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiFloat32x8 x y) + // result: (VINSERTF128256 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { +func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt8x32 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiFloat64x4 x y) + // result: (VINSERTF128256 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { +func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt8x64 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiFloat64x8 x y) + // result: (VINSERTF64X4512 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint16x16 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt16x16 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint16x32 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt16x32 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint32x16 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt32x16 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint32x8 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt32x8 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint64x4 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt64x4 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint64x8 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt64x8 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint8x32 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt8x32 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint8x64 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt8x64 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (SetHiUint16x16 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (SetHiUint16x32 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (SetHiUint32x16 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (SetHiUint32x8 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (SetHiUint64x4 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (SetHiUint64x8 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (SetHiUint8x32 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (SetHiUint8x64 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (SetLoFloat32x16 x y) + // result: (VINSERTF64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (SetLoFloat32x8 x y) + // result: (VINSERTF128256 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (SetLoFloat64x4 x y) + // result: (VINSERTF128256 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (SetLoFloat64x8 x y) + // result: (VINSERTF64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (SetLoInt16x16 x y) + // result: (VINSERTI128256 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (SetLoInt16x32 x y) + // result: (VINSERTI64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (SetLoInt32x16 x y) + // result: (VINSERTI64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt16x16 x y) - // result: (VPSLLW256 x y) + // match: (SetLoInt32x8 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW256) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt16x32 x y) - // result: (VPSLLW512 x y) + // match: (SetLoInt64x4 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW512) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt16x8 x y) - // result: (VPSLLW128 x y) + // match: (SetLoInt64x8 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW128) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt32x16 x y) - // result: (VPSLLD512 x y) + // match: (SetLoInt8x32 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD512) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } -} -func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftInt32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt32x4 x y) - // result: (VPSLLD128 x y) +} +func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt8x64 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD128) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [uint8(c)] x) + // match: (SetLoUint16x16 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt32x8 x y) - // result: (VPSLLD256 x y) +} +func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint16x32 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD256) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [uint8(c)] x) + // match: (SetLoUint32x16 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt64x2 x y) - // result: (VPSLLQ128 x y) +} +func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint32x8 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ128) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [uint8(c)] x) + // match: (SetLoUint64x4 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt64x4 x y) - // result: (VPSLLQ256 x y) +} +func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint64x8 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ256) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [uint8(c)] x) + // match: (SetLoUint8x32 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt64x8 x y) - // result: (VPSLLQ512 x y) +} +func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint8x64 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ512) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt16x16 x y mask) - // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked256) + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt16x32 x y mask) - // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked512) + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt16x8 x y mask) - // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked128) + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt32x16 x y mask) - // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked512) + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt32x4 x y mask) - // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked128) + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt32x8 x y mask) - // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked256) + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint16x16 x y mask) - // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked256) + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint16x32 x y mask) - // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked512) + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint16x8 x y mask) - // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked128) + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint32x16 x y mask) - // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked512) + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint32x4 x y mask) - // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked128) + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint32x8 x y mask) - // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked256) + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) + y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } - // match: (ShiftAllLeftMaskedUint64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +} +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint16x16 x y) - // result: (VPSLLW256 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint16x32 x y) - // result: (VPSLLW512 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint16x8 x y) - // result: (VPSLLW128 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint32x16 x y) - // result: (VPSLLD512 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint32x4 x y) - // result: (VPSLLD128 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint32x8 x y) - // result: (VPSLLD256 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint64x2 x y) - // result: (VPSLLQ128 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint64x4 x y) - // result: (VPSLLQ256 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint64x8 x y) - // result: (VPSLLQ512 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftMaskedUint16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftMaskedUint16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftMaskedUint16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (ShiftAllLeftMaskedUint32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (ShiftAllLeftMaskedUint32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (ShiftAllLeftMaskedUint32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllLeftMaskedUint64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllLeftMaskedUint64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllLeftMaskedUint64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -53975,12 +53831,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -53995,12 +53851,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54015,12 +53871,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54035,12 +53891,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54055,12 +53911,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54075,12 +53931,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54095,12 +53951,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54115,12 +53971,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54135,261 +53991,191 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightInt16x16 x (MOVQconst [c])) - // result: (VPSRAW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt16x16 x y) - // result: (VPSRAW256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRAW256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightInt16x32 x (MOVQconst [c])) - // result: (VPSRAW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt16x32 x y) - // result: (VPSRAW512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRAW512) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt16x8 x (MOVQconst [c])) - // result: (VPSRAW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt16x8 x y) - // result: (VPSRAW128 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAW128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt32x16 x (MOVQconst [c])) - // result: (VPSRAD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt32x16 x y) - // result: (VPSRAD512 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAD512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt32x4 x (MOVQconst [c])) - // result: (VPSRAD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt32x4 x y) - // result: (VPSRAD128 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAD128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt32x8 x (MOVQconst [c])) - // result: (VPSRAD256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt32x8 x y) - // result: (VPSRAD256 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAD256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt64x2 x (MOVQconst [c])) - // result: (VPSRAQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt64x2 x y) - // result: (VPSRAQ128 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAQ128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt64x4 x (MOVQconst [c])) - // result: (VPSRAQ256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt64x4 x y) - // result: (VPSRAQ256 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAQ256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt64x8 x (MOVQconst [c])) - // result: (VPSRAQ512const [uint8(c)] x) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (ShiftAllRightInt64x8 x y) - // result: (VPSRAQ512 x y) +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAQ512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) + // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) + y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block // match: (ShiftAllRightMaskedInt16x16 x y mask) // result: (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -54408,22 +54194,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt16x32 x y mask) // result: (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -54442,22 +54212,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt16x8 x y mask) // result: (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -54476,22 +54230,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt32x16 x y mask) // result: (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -54510,22 +54248,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt32x4 x y mask) // result: (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -54544,22 +54266,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt32x8 x y mask) // result: (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -54578,22 +54284,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt64x2 x y mask) // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -54612,22 +54302,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt64x4 x y mask) // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -54646,22 +54320,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt64x8 x y mask) // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -54680,22 +54338,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint16x16 x y mask) // result: (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -54714,22 +54356,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint16x32 x y mask) // result: (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -54748,22 +54374,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint16x8 x y mask) // result: (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -54782,22 +54392,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint32x16 x y mask) // result: (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -54816,22 +54410,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint32x4 x y mask) // result: (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -54850,22 +54428,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint32x8 x y mask) // result: (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -54884,22 +54446,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint64x2 x y mask) // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -54918,22 +54464,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint64x4 x y mask) // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -54952,22 +54482,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint64x8 x y mask) // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -54981,240 +54495,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint16x16 x (MOVQconst [c])) - // result: (VPSRLW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint16x16 x y) - // result: (VPSRLW256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLW256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint16x32 x (MOVQconst [c])) - // result: (VPSRLW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint16x32 x y) - // result: (VPSRLW512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLW512) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint16x8 x (MOVQconst [c])) - // result: (VPSRLW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint16x8 x y) - // result: (VPSRLW128 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLW128) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint32x16 x (MOVQconst [c])) - // result: (VPSRLD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint32x16 x y) - // result: (VPSRLD512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLD512) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint32x4 x (MOVQconst [c])) - // result: (VPSRLD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint32x4 x y) - // result: (VPSRLD128 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLD128) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint32x8 x (MOVQconst [c])) - // result: (VPSRLD256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint32x8 x y) - // result: (VPSRLD256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLD256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint64x2 x (MOVQconst [c])) - // result: (VPSRLQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint64x2 x y) - // result: (VPSRLQ128 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLQ128) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint64x4 x (MOVQconst [c])) - // result: (VPSRLQ256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint64x4 x y) - // result: (VPSRLQ256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLQ256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint64x8 x (MOVQconst [c])) - // result: (VPSRLQ512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint64x8 x y) - // result: (VPSRLQ512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLQ512) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] From d5dea86993e1bc07bb9a49d2930655050da006d7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 7 Aug 2025 16:44:50 -0400 Subject: [PATCH 127/139] [dev.simd] cmd/compile: fix isIntrinsic for methods; fix fp <-> gp moves also includes a handy debugging hook for the inliner. Change-Id: I23d0619506219d21db78c6c801612ff058562142 Reviewed-on: https://go-review.googlesource.com/c/go/+/694118 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 84 +++++++++++++------ src/cmd/compile/internal/inline/inl.go | 36 +++++++- src/cmd/compile/internal/ssagen/intrinsics.go | 7 ++ 3 files changed, 97 insertions(+), 30 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index d3fae7ce14c8e6..38815929d2520f 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -43,6 +43,10 @@ func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { } } +func isFPReg(r int16) bool { + return x86.REG_X0 <= r && r <= x86.REG_Z31 +} + // loadByType returns the load instruction of the given type. func loadByType(t *types.Type) obj.As { // Avoid partial register write @@ -88,31 +92,33 @@ func storeByType(t *types.Type) obj.As { } // moveByType returns the reg->reg move instruction of the given type. -func moveByType(t *types.Type) obj.As { - if t.IsFloat() { +func moveByType(from, to *ssa.Value) obj.As { + toT := to.Type + fromR, toR := from.Reg(), to.Reg() + if isFPReg(fromR) && isFPReg(toR) && toT.IsFloat() { // Moving the whole sse2 register is faster // than moving just the correct low portion of it. // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. return x86.AMOVUPS - } else if t.IsSIMD() { - return simdMov(t.Size()) - } else { - switch t.Size() { - case 1: - // Avoids partial register write - return x86.AMOVL - case 2: - return x86.AMOVL - case 4: - return x86.AMOVL - case 8: - return x86.AMOVQ - case 16: - return x86.AMOVUPS // int128s are in SSE registers - default: - panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t)) - } + } + if toT.IsSIMD() { + return simdMov(toT.Size()) + } + switch toT.Size() { + case 1: + // Avoids partial register write + return x86.AMOVL + case 2: + return x86.AMOVL + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS // int128s are in SSE registers + default: + panic(fmt.Sprintf("bad int register width %d:%v", toT.Size(), toT)) } } @@ -648,7 +654,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // But this requires a way for regalloc to know that SRC might be // clobbered by this instruction. t := v.RegTmp() - opregreg(s, moveByType(v.Type), t, v.Args[1].Reg()) + opregreg(s, moveByType(v.Args[1], v), t, v.Args[1].Reg()) p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -820,13 +826,37 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = x + case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst: x := v.Reg() - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_FCONST - p.From.Val = math.Float64frombits(uint64(v.AuxInt)) - p.To.Type = obj.TYPE_REG - p.To.Reg = x + a := v.Op.Asm() + if x < x86.REG_X0 { // not an FP register + if v.AuxInt == 0 && v.Aux == nil { + opregreg(s, x86.AXORL, x, x) + break + } + c := v.AuxInt + switch v.Type.Size() { + case 4: + a = x86.AMOVL + c = int64(math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt))))) + case 8: + a = x86.AMOVQ + default: + panic(fmt.Sprintf("unexpected type width for float const into non-float register, %v", v)) + } + p := s.Prog(a) + p.From.Type = obj.TYPE_CONST + p.From.Offset = c + p.To.Type = obj.TYPE_REG + p.To.Reg = x + } else { + p := s.Prog(a) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x + } case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload: @@ -1134,7 +1164,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { y = simdOrMaskReg(v) } if x != y { - opregreg(s, moveByType(v.Type), y, x) + opregreg(s, moveByType(v.Args[0], v), y, x) } case ssa.OpLoadReg: if v.Type.IsFlags() { diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index c06f76fe9ff029..1ba8350803052e 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -202,6 +202,7 @@ func inlineBudget(fn *ir.Func, profile *pgoir.Profile, relaxed bool, verbose boo // be very liberal here, if the closure is only called once, the budget is large budget = max(budget, inlineClosureCalledOnceCost) } + return budget } @@ -263,6 +264,7 @@ func CanInline(fn *ir.Func, profile *pgoir.Profile) { visitor := hairyVisitor{ curFunc: fn, + debug: isDebugFn(fn), isBigFunc: IsBigFunc(fn), budget: budget, maxBudget: budget, @@ -407,6 +409,7 @@ type hairyVisitor struct { // This is needed to access the current caller in the doNode function. curFunc *ir.Func isBigFunc bool + debug bool budget int32 maxBudget int32 reason string @@ -416,6 +419,16 @@ type hairyVisitor struct { profile *pgoir.Profile } +func isDebugFn(fn *ir.Func) bool { + // if n := fn.Nname; n != nil && n.Sym().Pkg.Path == "0" { + // if n.Sym().Name == "BroadcastInt64x4" { + // fmt.Printf("isDebugFn '%s' DOT '%s'\n", n.Sym().Pkg.Path, n.Sym().Name) + // return true + // } + // } + return false +} + func (v *hairyVisitor) tooHairy(fn *ir.Func) bool { v.do = v.doNode // cache closure if ir.DoChildren(fn, v.do) { @@ -434,6 +447,9 @@ func (v *hairyVisitor) doNode(n ir.Node) bool { if n == nil { return false } + if v.debug { + fmt.Printf("%v: doNode %v budget is %d\n", ir.Line(n), n.Op(), v.budget) + } opSwitch: switch n.Op() { // Call is okay if inlinable and we have the budget for the body. @@ -551,12 +567,19 @@ opSwitch: } if cheap { + if v.debug { + if ir.IsIntrinsicCall(n) { + fmt.Printf("%v: cheap call is also intrinsic, %v\n", ir.Line(n), n) + } + } break // treat like any other node, that is, cost of 1 } if ir.IsIntrinsicCall(n) { - // Treat like any other node. - break + if v.debug { + fmt.Printf("%v: intrinsic call, %v\n", ir.Line(n), n) + } + break // Treat like any other node. } if callee := inlCallee(v.curFunc, n.Fun, v.profile, false); callee != nil && typecheck.HaveInlineBody(callee) { @@ -583,6 +606,10 @@ opSwitch: } } + if v.debug { + fmt.Printf("%v: costly OCALLFUNC %v\n", ir.Line(n), n) + } + // Call cost for non-leaf inlining. v.budget -= extraCost @@ -592,6 +619,9 @@ opSwitch: // Things that are too hairy, irrespective of the budget case ir.OCALL, ir.OCALLINTER: // Call cost for non-leaf inlining. + if v.debug { + fmt.Printf("%v: costly OCALL %v\n", ir.Line(n), n) + } v.budget -= v.extraCallCost case ir.OPANIC: @@ -743,7 +773,7 @@ opSwitch: v.budget-- // When debugging, don't stop early, to get full cost of inlining this function - if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() { + if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() && !v.debug { v.reason = "too expensive" return true } diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index ee03075f524af8..f5b5b9bb7cd828 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1913,6 +1913,13 @@ func IsIntrinsicCall(n *ir.CallExpr) bool { } name, ok := n.Fun.(*ir.Name) if !ok { + if n.Fun.Op() == ir.OMETHEXPR { + if meth := ir.MethodExprName(n.Fun); meth != nil { + if fn := meth.Func; fn != nil { + return IsIntrinsicSym(fn.Sym()) + } + } + } return false } return IsIntrinsicSym(name.Sym()) From e001300cf21bad54afb5052e9ff823f8c1cbd407 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 13 Aug 2025 12:44:01 -0400 Subject: [PATCH 128/139] [dev.simd] cmd/compile: fix LoadReg so it is aware of register target SIMD code generation created interesting new type/register combintations. Change-Id: I9c9a73bf51f6cb54551db1fdc88f9dd1eef7ab26 Reviewed-on: https://go-review.googlesource.com/c/go/+/695895 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 44 ++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 38815929d2520f..8d4e602beda46d 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -47,8 +47,8 @@ func isFPReg(r int16) bool { return x86.REG_X0 <= r && r <= x86.REG_Z31 } -// loadByType returns the load instruction of the given type. -func loadByType(t *types.Type) obj.As { +// loadByTypeAndReg returns the load instruction of the given type/register. +func loadByTypeAndReg(t *types.Type, r int16) obj.As { // Avoid partial register write if !t.IsFloat() { switch t.Size() { @@ -59,7 +59,37 @@ func loadByType(t *types.Type) obj.As { } } // Otherwise, there's no difference between load and store opcodes. - return storeByType(t) + return storeByTypeAndReg(t, r) +} + +// storeByTypeAndReg returns the store instruction of the given type/register. +func storeByTypeAndReg(t *types.Type, r int16) obj.As { + width := t.Size() + if t.IsSIMD() { + return simdMov(width) + } + if isFPReg(r) { + switch width { + case 4: + return x86.AMOVSS + case 8: + return x86.AMOVSD + } + } else { + switch width { + case 1: + return x86.AMOVB + case 2: + return x86.AMOVW + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS + } + } + panic(fmt.Sprintf("bad store type %v", t)) } // storeByType returns the store instruction of the given type. @@ -1171,10 +1201,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { v.Fatalf("load flags not implemented: %v", v.LongString()) return } - p := s.Prog(loadByType(v.Type)) + r := v.Reg() + p := s.Prog(loadByTypeAndReg(v.Type, r)) ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG - r := v.Reg() if v.Type.IsSIMD() { r = simdOrMaskReg(v) } @@ -1206,7 +1236,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) s.FuncInfo().AddSpill( - obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByType(ap.Type), Spill: storeByType(ap.Type)}) + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByType(ap.Type)}) } v.Block.Func.RegArgs = nil ssagen.CheckArgReg(v) @@ -2090,7 +2120,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { } func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p := s.Prog(loadByType(t)) + p := s.Prog(loadByTypeAndReg(t, reg)) p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_AUTO p.From.Sym = n.Linksym() From ddb689c7bb681023491109c7d9673f389d6e06ee Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 5 Aug 2025 17:34:05 -0400 Subject: [PATCH 129/139] [dev.simd] simd, cmd/compile: generated code for Broadcast Generated by simdgen CL 693599 This turned out to require some additional work in other places, including filling in missing methods (use OverwriteBase to get FP versions). Also includes a test. Change-Id: I2efe8967837834745f9cae661d4d4dcbb5390b6f Reviewed-on: https://go-review.googlesource.com/c/go/+/693758 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 59 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 62 ++ .../compile/internal/ssa/_gen/simdAMD64ops.go | 38 +- .../internal/ssa/_gen/simdgenericOps.go | 62 ++ src/cmd/compile/internal/ssa/opGen.go | 887 +++++++++++++++++- src/cmd/compile/internal/ssa/rewriteAMD64.go | 636 +++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 62 ++ src/simd/genfiles.go | 79 +- src/simd/ops_amd64.go | 446 +++++++++ src/simd/simd_test.go | 12 + src/simd/slice_amd64.go | 270 ++++++ 11 files changed, 2575 insertions(+), 38 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e6bbdc03def1e4..73a947a88af24a 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -24,6 +24,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VBROADCASTSS128, + ssa.OpAMD64VPBROADCASTQ128, + ssa.OpAMD64VPBROADCASTB128, + ssa.OpAMD64VPBROADCASTW128, + ssa.OpAMD64VPBROADCASTD128, + ssa.OpAMD64VBROADCASTSS256, + ssa.OpAMD64VBROADCASTSD256, + ssa.OpAMD64VPBROADCASTB256, + ssa.OpAMD64VPBROADCASTW256, + ssa.OpAMD64VPBROADCASTD256, + ssa.OpAMD64VPBROADCASTQ256, + ssa.OpAMD64VBROADCASTSS512, + ssa.OpAMD64VBROADCASTSD512, + ssa.OpAMD64VPBROADCASTB512, + ssa.OpAMD64VPBROADCASTW512, + ssa.OpAMD64VPBROADCASTD512, + ssa.OpAMD64VPBROADCASTQ512, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, ssa.OpAMD64VCVTTPS2DQ512, @@ -624,6 +641,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VBROADCASTSSMasked128, + ssa.OpAMD64VPBROADCASTQMasked128, + ssa.OpAMD64VPBROADCASTBMasked128, + ssa.OpAMD64VPBROADCASTWMasked128, + ssa.OpAMD64VPBROADCASTDMasked128, + ssa.OpAMD64VBROADCASTSSMasked256, + ssa.OpAMD64VBROADCASTSDMasked256, + ssa.OpAMD64VPBROADCASTBMasked256, + ssa.OpAMD64VPBROADCASTWMasked256, + ssa.OpAMD64VPBROADCASTDMasked256, + ssa.OpAMD64VPBROADCASTQMasked256, + ssa.OpAMD64VBROADCASTSSMasked512, + ssa.OpAMD64VBROADCASTSDMasked512, + ssa.OpAMD64VPBROADCASTBMasked512, + ssa.OpAMD64VPBROADCASTWMasked512, + ssa.OpAMD64VPBROADCASTDMasked512, + ssa.OpAMD64VPBROADCASTQMasked512, ssa.OpAMD64VCOMPRESSPSMasked128, ssa.OpAMD64VCOMPRESSPSMasked256, ssa.OpAMD64VCOMPRESSPSMasked512, @@ -1104,10 +1138,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRLQMasked512: p = simdVfpkv(s, v) - case ssa.OpAMD64VPINSRB128, - ssa.OpAMD64VPINSRW128, - ssa.OpAMD64VPINSRD128, - ssa.OpAMD64VPINSRQ128: + case ssa.OpAMD64VPINSRD128, + ssa.OpAMD64VPINSRQ128, + ssa.OpAMD64VPINSRB128, + ssa.OpAMD64VPINSRW128: p = simdVgpvImm8(s, v) case ssa.OpAMD64VPEXTRB128, @@ -1221,6 +1255,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGWMasked256, ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VBROADCASTSSMasked128, + ssa.OpAMD64VPBROADCASTQMasked128, + ssa.OpAMD64VPBROADCASTBMasked128, + ssa.OpAMD64VPBROADCASTWMasked128, + ssa.OpAMD64VPBROADCASTDMasked128, + ssa.OpAMD64VBROADCASTSSMasked256, + ssa.OpAMD64VBROADCASTSDMasked256, + ssa.OpAMD64VPBROADCASTBMasked256, + ssa.OpAMD64VPBROADCASTWMasked256, + ssa.OpAMD64VPBROADCASTDMasked256, + ssa.OpAMD64VPBROADCASTQMasked256, + ssa.OpAMD64VBROADCASTSSMasked512, + ssa.OpAMD64VBROADCASTSDMasked512, + ssa.OpAMD64VPBROADCASTBMasked512, + ssa.OpAMD64VPBROADCASTWMasked512, + ssa.OpAMD64VPBROADCASTDMasked512, + ssa.OpAMD64VPBROADCASTQMasked512, ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, ssa.OpAMD64VRNDSCALEPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 80cddaae79e2a1..e7c5a1a97d372d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -228,6 +228,66 @@ (AverageMaskedUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) (AverageMaskedUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) (AverageMaskedUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) +(Broadcast128Float32x4 ...) => (VBROADCASTSS128 ...) +(Broadcast128Float64x2 ...) => (VPBROADCASTQ128 ...) +(Broadcast128Int8x16 ...) => (VPBROADCASTB128 ...) +(Broadcast128Int16x8 ...) => (VPBROADCASTW128 ...) +(Broadcast128Int32x4 ...) => (VPBROADCASTD128 ...) +(Broadcast128Int64x2 ...) => (VPBROADCASTQ128 ...) +(Broadcast128Uint8x16 ...) => (VPBROADCASTB128 ...) +(Broadcast128Uint16x8 ...) => (VPBROADCASTW128 ...) +(Broadcast128Uint32x4 ...) => (VPBROADCASTD128 ...) +(Broadcast128Uint64x2 ...) => (VPBROADCASTQ128 ...) +(Broadcast128MaskedFloat32x4 x mask) => (VBROADCASTSSMasked128 x (VPMOVVec32x4ToM mask)) +(Broadcast128MaskedFloat64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) +(Broadcast128MaskedInt8x16 x mask) => (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) +(Broadcast128MaskedInt16x8 x mask) => (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) +(Broadcast128MaskedInt32x4 x mask) => (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) +(Broadcast128MaskedInt64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) +(Broadcast128MaskedUint8x16 x mask) => (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) +(Broadcast128MaskedUint16x8 x mask) => (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) +(Broadcast128MaskedUint32x4 x mask) => (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) +(Broadcast128MaskedUint64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) +(Broadcast256Float32x4 ...) => (VBROADCASTSS256 ...) +(Broadcast256Float64x2 ...) => (VBROADCASTSD256 ...) +(Broadcast256Int8x16 ...) => (VPBROADCASTB256 ...) +(Broadcast256Int16x8 ...) => (VPBROADCASTW256 ...) +(Broadcast256Int32x4 ...) => (VPBROADCASTD256 ...) +(Broadcast256Int64x2 ...) => (VPBROADCASTQ256 ...) +(Broadcast256Uint8x16 ...) => (VPBROADCASTB256 ...) +(Broadcast256Uint16x8 ...) => (VPBROADCASTW256 ...) +(Broadcast256Uint32x4 ...) => (VPBROADCASTD256 ...) +(Broadcast256Uint64x2 ...) => (VPBROADCASTQ256 ...) +(Broadcast256MaskedFloat32x4 x mask) => (VBROADCASTSSMasked256 x (VPMOVVec32x4ToM mask)) +(Broadcast256MaskedFloat64x2 x mask) => (VBROADCASTSDMasked256 x (VPMOVVec64x2ToM mask)) +(Broadcast256MaskedInt8x16 x mask) => (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) +(Broadcast256MaskedInt16x8 x mask) => (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) +(Broadcast256MaskedInt32x4 x mask) => (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) +(Broadcast256MaskedInt64x2 x mask) => (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) +(Broadcast256MaskedUint8x16 x mask) => (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) +(Broadcast256MaskedUint16x8 x mask) => (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) +(Broadcast256MaskedUint32x4 x mask) => (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) +(Broadcast256MaskedUint64x2 x mask) => (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) +(Broadcast512Float32x4 ...) => (VBROADCASTSS512 ...) +(Broadcast512Float64x2 ...) => (VBROADCASTSD512 ...) +(Broadcast512Int8x16 ...) => (VPBROADCASTB512 ...) +(Broadcast512Int16x8 ...) => (VPBROADCASTW512 ...) +(Broadcast512Int32x4 ...) => (VPBROADCASTD512 ...) +(Broadcast512Int64x2 ...) => (VPBROADCASTQ512 ...) +(Broadcast512Uint8x16 ...) => (VPBROADCASTB512 ...) +(Broadcast512Uint16x8 ...) => (VPBROADCASTW512 ...) +(Broadcast512Uint32x4 ...) => (VPBROADCASTD512 ...) +(Broadcast512Uint64x2 ...) => (VPBROADCASTQ512 ...) +(Broadcast512MaskedFloat32x4 x mask) => (VBROADCASTSSMasked512 x (VPMOVVec32x4ToM mask)) +(Broadcast512MaskedFloat64x2 x mask) => (VBROADCASTSDMasked512 x (VPMOVVec64x2ToM mask)) +(Broadcast512MaskedInt8x16 x mask) => (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) +(Broadcast512MaskedInt16x8 x mask) => (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) +(Broadcast512MaskedInt32x4 x mask) => (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) +(Broadcast512MaskedInt64x2 x mask) => (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) +(Broadcast512MaskedUint8x16 x mask) => (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) +(Broadcast512MaskedUint16x8 x mask) => (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) +(Broadcast512MaskedUint32x4 x mask) => (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) +(Broadcast512MaskedUint64x2 x mask) => (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) (CeilFloat32x4 x) => (VROUNDPS128 [2] x) (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) @@ -1396,6 +1456,8 @@ (ScaleMaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) (ScaleMaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) (ScaleMaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) +(SetElemFloat32x4 ...) => (VPINSRD128 ...) +(SetElemFloat64x2 ...) => (VPINSRQ128 ...) (SetElemInt8x16 ...) => (VPINSRB128 ...) (SetElemInt16x8 ...) => (VPINSRW128 ...) (SetElemInt32x4 ...) => (VPINSRD128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index afea4c0a46e4db..5d388a4531ba4b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -20,6 +20,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSD256", argLength: 1, reg: v11, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSD512", argLength: 1, reg: w11, asm: "VBROADCASTSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VBROADCASTSDMasked256", argLength: 2, reg: wkw, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSDMasked512", argLength: 2, reg: wkw, asm: "VBROADCASTSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VBROADCASTSS128", argLength: 1, reg: v11, asm: "VBROADCASTSS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VBROADCASTSS256", argLength: 1, reg: v11, asm: "VBROADCASTSS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSS512", argLength: 1, reg: w11, asm: "VBROADCASTSS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VBROADCASTSSMasked128", argLength: 2, reg: wkw, asm: "VBROADCASTSS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VBROADCASTSSMasked256", argLength: 2, reg: wkw, asm: "VBROADCASTSS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSSMasked512", argLength: 2, reg: wkw, asm: "VBROADCASTSS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -252,6 +262,30 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPBLENDMWMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPBLENDVB128", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPBLENDVB256", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTB128", argLength: 1, reg: v11, asm: "VPBROADCASTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTB256", argLength: 1, reg: v11, asm: "VPBROADCASTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTB512", argLength: 1, reg: w11, asm: "VPBROADCASTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTBMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTBMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTBMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTD128", argLength: 1, reg: v11, asm: "VPBROADCASTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTD256", argLength: 1, reg: v11, asm: "VPBROADCASTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTD512", argLength: 1, reg: w11, asm: "VPBROADCASTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTDMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTDMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTDMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTQ128", argLength: 1, reg: v11, asm: "VPBROADCASTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTQ256", argLength: 1, reg: v11, asm: "VPBROADCASTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTQ512", argLength: 1, reg: w11, asm: "VPBROADCASTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTQMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTQMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTQMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTW128", argLength: 1, reg: v11, asm: "VPBROADCASTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTW256", argLength: 1, reg: v11, asm: "VPBROADCASTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTW512", argLength: 1, reg: w11, asm: "VPBROADCASTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTWMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTWMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTWMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, @@ -1000,10 +1034,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index fea701e174f0bb..f120dcddd0c0c9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -232,6 +232,66 @@ func simdGenericOps() []opData { {name: "AverageUint16x8", argLength: 2, commutative: true}, {name: "AverageUint16x16", argLength: 2, commutative: true}, {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "Broadcast128Float32x4", argLength: 1, commutative: false}, + {name: "Broadcast128Float64x2", argLength: 1, commutative: false}, + {name: "Broadcast128Int8x16", argLength: 1, commutative: false}, + {name: "Broadcast128Int16x8", argLength: 1, commutative: false}, + {name: "Broadcast128Int32x4", argLength: 1, commutative: false}, + {name: "Broadcast128Int64x2", argLength: 1, commutative: false}, + {name: "Broadcast128MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedFloat64x2", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt8x16", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt16x8", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt32x4", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt64x2", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint8x16", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint16x8", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint32x4", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint64x2", argLength: 2, commutative: false}, + {name: "Broadcast128Uint8x16", argLength: 1, commutative: false}, + {name: "Broadcast128Uint16x8", argLength: 1, commutative: false}, + {name: "Broadcast128Uint32x4", argLength: 1, commutative: false}, + {name: "Broadcast128Uint64x2", argLength: 1, commutative: false}, + {name: "Broadcast256Float32x4", argLength: 1, commutative: false}, + {name: "Broadcast256Float64x2", argLength: 1, commutative: false}, + {name: "Broadcast256Int8x16", argLength: 1, commutative: false}, + {name: "Broadcast256Int16x8", argLength: 1, commutative: false}, + {name: "Broadcast256Int32x4", argLength: 1, commutative: false}, + {name: "Broadcast256Int64x2", argLength: 1, commutative: false}, + {name: "Broadcast256MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedFloat64x2", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt8x16", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt16x8", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt32x4", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt64x2", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint8x16", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint16x8", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint32x4", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint64x2", argLength: 2, commutative: false}, + {name: "Broadcast256Uint8x16", argLength: 1, commutative: false}, + {name: "Broadcast256Uint16x8", argLength: 1, commutative: false}, + {name: "Broadcast256Uint32x4", argLength: 1, commutative: false}, + {name: "Broadcast256Uint64x2", argLength: 1, commutative: false}, + {name: "Broadcast512Float32x4", argLength: 1, commutative: false}, + {name: "Broadcast512Float64x2", argLength: 1, commutative: false}, + {name: "Broadcast512Int8x16", argLength: 1, commutative: false}, + {name: "Broadcast512Int16x8", argLength: 1, commutative: false}, + {name: "Broadcast512Int32x4", argLength: 1, commutative: false}, + {name: "Broadcast512Int64x2", argLength: 1, commutative: false}, + {name: "Broadcast512MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedFloat64x2", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt8x16", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt16x8", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt32x4", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt64x2", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint8x16", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint16x8", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint32x4", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint64x2", argLength: 2, commutative: false}, + {name: "Broadcast512Uint8x16", argLength: 1, commutative: false}, + {name: "Broadcast512Uint16x8", argLength: 1, commutative: false}, + {name: "Broadcast512Uint32x4", argLength: 1, commutative: false}, + {name: "Broadcast512Uint64x2", argLength: 1, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, @@ -1812,6 +1872,8 @@ func simdGenericOps() []opData { {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 77527c83b8c751..6e0ffd15408587 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1242,6 +1242,16 @@ const ( OpAMD64VADDSUBPD256 OpAMD64VADDSUBPS128 OpAMD64VADDSUBPS256 + OpAMD64VBROADCASTSD256 + OpAMD64VBROADCASTSD512 + OpAMD64VBROADCASTSDMasked256 + OpAMD64VBROADCASTSDMasked512 + OpAMD64VBROADCASTSS128 + OpAMD64VBROADCASTSS256 + OpAMD64VBROADCASTSS512 + OpAMD64VBROADCASTSSMasked128 + OpAMD64VBROADCASTSSMasked256 + OpAMD64VBROADCASTSSMasked512 OpAMD64VCOMPRESSPDMasked128 OpAMD64VCOMPRESSPDMasked256 OpAMD64VCOMPRESSPDMasked512 @@ -1474,6 +1484,30 @@ const ( OpAMD64VPBLENDMWMasked512 OpAMD64VPBLENDVB128 OpAMD64VPBLENDVB256 + OpAMD64VPBROADCASTB128 + OpAMD64VPBROADCASTB256 + OpAMD64VPBROADCASTB512 + OpAMD64VPBROADCASTBMasked128 + OpAMD64VPBROADCASTBMasked256 + OpAMD64VPBROADCASTBMasked512 + OpAMD64VPBROADCASTD128 + OpAMD64VPBROADCASTD256 + OpAMD64VPBROADCASTD512 + OpAMD64VPBROADCASTDMasked128 + OpAMD64VPBROADCASTDMasked256 + OpAMD64VPBROADCASTDMasked512 + OpAMD64VPBROADCASTQ128 + OpAMD64VPBROADCASTQ256 + OpAMD64VPBROADCASTQ512 + OpAMD64VPBROADCASTQMasked128 + OpAMD64VPBROADCASTQMasked256 + OpAMD64VPBROADCASTQMasked512 + OpAMD64VPBROADCASTW128 + OpAMD64VPBROADCASTW256 + OpAMD64VPBROADCASTW512 + OpAMD64VPBROADCASTWMasked128 + OpAMD64VPBROADCASTWMasked256 + OpAMD64VPBROADCASTWMasked512 OpAMD64VPCMPEQB128 OpAMD64VPCMPEQB256 OpAMD64VPCMPEQB512 @@ -2222,10 +2256,10 @@ const ( OpAMD64VPRORQMasked128 OpAMD64VPRORQMasked256 OpAMD64VPRORQMasked512 - OpAMD64VPINSRB128 - OpAMD64VPINSRW128 OpAMD64VPINSRD128 OpAMD64VPINSRQ128 + OpAMD64VPINSRB128 + OpAMD64VPINSRW128 OpAMD64VINSERTF128256 OpAMD64VINSERTF64X4512 OpAMD64VINSERTI128256 @@ -4839,6 +4873,66 @@ const ( OpAverageUint16x8 OpAverageUint16x16 OpAverageUint16x32 + OpBroadcast128Float32x4 + OpBroadcast128Float64x2 + OpBroadcast128Int8x16 + OpBroadcast128Int16x8 + OpBroadcast128Int32x4 + OpBroadcast128Int64x2 + OpBroadcast128MaskedFloat32x4 + OpBroadcast128MaskedFloat64x2 + OpBroadcast128MaskedInt8x16 + OpBroadcast128MaskedInt16x8 + OpBroadcast128MaskedInt32x4 + OpBroadcast128MaskedInt64x2 + OpBroadcast128MaskedUint8x16 + OpBroadcast128MaskedUint16x8 + OpBroadcast128MaskedUint32x4 + OpBroadcast128MaskedUint64x2 + OpBroadcast128Uint8x16 + OpBroadcast128Uint16x8 + OpBroadcast128Uint32x4 + OpBroadcast128Uint64x2 + OpBroadcast256Float32x4 + OpBroadcast256Float64x2 + OpBroadcast256Int8x16 + OpBroadcast256Int16x8 + OpBroadcast256Int32x4 + OpBroadcast256Int64x2 + OpBroadcast256MaskedFloat32x4 + OpBroadcast256MaskedFloat64x2 + OpBroadcast256MaskedInt8x16 + OpBroadcast256MaskedInt16x8 + OpBroadcast256MaskedInt32x4 + OpBroadcast256MaskedInt64x2 + OpBroadcast256MaskedUint8x16 + OpBroadcast256MaskedUint16x8 + OpBroadcast256MaskedUint32x4 + OpBroadcast256MaskedUint64x2 + OpBroadcast256Uint8x16 + OpBroadcast256Uint16x8 + OpBroadcast256Uint32x4 + OpBroadcast256Uint64x2 + OpBroadcast512Float32x4 + OpBroadcast512Float64x2 + OpBroadcast512Int8x16 + OpBroadcast512Int16x8 + OpBroadcast512Int32x4 + OpBroadcast512Int64x2 + OpBroadcast512MaskedFloat32x4 + OpBroadcast512MaskedFloat64x2 + OpBroadcast512MaskedInt8x16 + OpBroadcast512MaskedInt16x8 + OpBroadcast512MaskedInt32x4 + OpBroadcast512MaskedInt64x2 + OpBroadcast512MaskedUint8x16 + OpBroadcast512MaskedUint16x8 + OpBroadcast512MaskedUint32x4 + OpBroadcast512MaskedUint64x2 + OpBroadcast512Uint8x16 + OpBroadcast512Uint16x8 + OpBroadcast512Uint32x4 + OpBroadcast512Uint64x2 OpCeilFloat32x4 OpCeilFloat32x8 OpCeilFloat64x2 @@ -6419,6 +6513,8 @@ const ( OpRoundToEvenScaledResidueMaskedFloat64x2 OpRoundToEvenScaledResidueMaskedFloat64x4 OpRoundToEvenScaledResidueMaskedFloat64x8 + OpSetElemFloat32x4 + OpSetElemFloat64x2 OpSetElemInt8x16 OpSetElemInt16x8 OpSetElemInt32x4 @@ -19771,6 +19867,141 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VBROADCASTSD256", + argLen: 1, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSD512", + argLen: 1, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSDMasked256", + argLen: 2, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSDMasked512", + argLen: 2, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSS128", + argLen: 1, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSS256", + argLen: 1, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSS512", + argLen: 1, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSSMasked128", + argLen: 2, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSSMasked256", + argLen: 2, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSSMasked512", + argLen: 2, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCOMPRESSPDMasked128", argLen: 2, @@ -23272,6 +23503,330 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPBROADCASTB128", + argLen: 1, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTB256", + argLen: 1, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTB512", + argLen: 1, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTBMasked128", + argLen: 2, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTBMasked256", + argLen: 2, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTBMasked512", + argLen: 2, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTD128", + argLen: 1, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTD256", + argLen: 1, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTD512", + argLen: 1, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTDMasked128", + argLen: 2, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTDMasked256", + argLen: 2, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTDMasked512", + argLen: 2, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQ128", + argLen: 1, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQ256", + argLen: 1, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQ512", + argLen: 1, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTQMasked128", + argLen: 2, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQMasked256", + argLen: 2, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQMasked512", + argLen: 2, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTW128", + argLen: 1, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTW256", + argLen: 1, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTW512", + argLen: 1, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTWMasked128", + argLen: 2, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTWMasked256", + argLen: 2, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTWMasked512", + argLen: 2, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB128", argLen: 2, @@ -34482,10 +35037,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRB128", + name: "VPINSRD128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRB, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34497,10 +35052,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", + name: "VPINSRQ128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRW, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34512,10 +35067,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRD128", + name: "VPINSRB128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRD, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34527,10 +35082,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRQ128", + name: "VPINSRW128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRQ, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -64725,6 +65280,306 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "Broadcast128Float32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Float64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Float32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Float64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Float32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Float64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Uint64x2", + argLen: 1, + generic: true, + }, { name: "CeilFloat32x4", argLen: 1, @@ -73153,6 +74008,18 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SetElemFloat32x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemFloat64x2", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, { name: "SetElemInt8x16", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c5367adefec432..0bdc0e63b7b536 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1317,6 +1317,156 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpBitLen64(v) case OpBitLen8: return rewriteValueAMD64_OpBitLen8(v) + case OpBroadcast128Float32x4: + v.Op = OpAMD64VBROADCASTSS128 + return true + case OpBroadcast128Float64x2: + v.Op = OpAMD64VPBROADCASTQ128 + return true + case OpBroadcast128Int16x8: + v.Op = OpAMD64VPBROADCASTW128 + return true + case OpBroadcast128Int32x4: + v.Op = OpAMD64VPBROADCASTD128 + return true + case OpBroadcast128Int64x2: + v.Op = OpAMD64VPBROADCASTQ128 + return true + case OpBroadcast128Int8x16: + v.Op = OpAMD64VPBROADCASTB128 + return true + case OpBroadcast128MaskedFloat32x4: + return rewriteValueAMD64_OpBroadcast128MaskedFloat32x4(v) + case OpBroadcast128MaskedFloat64x2: + return rewriteValueAMD64_OpBroadcast128MaskedFloat64x2(v) + case OpBroadcast128MaskedInt16x8: + return rewriteValueAMD64_OpBroadcast128MaskedInt16x8(v) + case OpBroadcast128MaskedInt32x4: + return rewriteValueAMD64_OpBroadcast128MaskedInt32x4(v) + case OpBroadcast128MaskedInt64x2: + return rewriteValueAMD64_OpBroadcast128MaskedInt64x2(v) + case OpBroadcast128MaskedInt8x16: + return rewriteValueAMD64_OpBroadcast128MaskedInt8x16(v) + case OpBroadcast128MaskedUint16x8: + return rewriteValueAMD64_OpBroadcast128MaskedUint16x8(v) + case OpBroadcast128MaskedUint32x4: + return rewriteValueAMD64_OpBroadcast128MaskedUint32x4(v) + case OpBroadcast128MaskedUint64x2: + return rewriteValueAMD64_OpBroadcast128MaskedUint64x2(v) + case OpBroadcast128MaskedUint8x16: + return rewriteValueAMD64_OpBroadcast128MaskedUint8x16(v) + case OpBroadcast128Uint16x8: + v.Op = OpAMD64VPBROADCASTW128 + return true + case OpBroadcast128Uint32x4: + v.Op = OpAMD64VPBROADCASTD128 + return true + case OpBroadcast128Uint64x2: + v.Op = OpAMD64VPBROADCASTQ128 + return true + case OpBroadcast128Uint8x16: + v.Op = OpAMD64VPBROADCASTB128 + return true + case OpBroadcast256Float32x4: + v.Op = OpAMD64VBROADCASTSS256 + return true + case OpBroadcast256Float64x2: + v.Op = OpAMD64VBROADCASTSD256 + return true + case OpBroadcast256Int16x8: + v.Op = OpAMD64VPBROADCASTW256 + return true + case OpBroadcast256Int32x4: + v.Op = OpAMD64VPBROADCASTD256 + return true + case OpBroadcast256Int64x2: + v.Op = OpAMD64VPBROADCASTQ256 + return true + case OpBroadcast256Int8x16: + v.Op = OpAMD64VPBROADCASTB256 + return true + case OpBroadcast256MaskedFloat32x4: + return rewriteValueAMD64_OpBroadcast256MaskedFloat32x4(v) + case OpBroadcast256MaskedFloat64x2: + return rewriteValueAMD64_OpBroadcast256MaskedFloat64x2(v) + case OpBroadcast256MaskedInt16x8: + return rewriteValueAMD64_OpBroadcast256MaskedInt16x8(v) + case OpBroadcast256MaskedInt32x4: + return rewriteValueAMD64_OpBroadcast256MaskedInt32x4(v) + case OpBroadcast256MaskedInt64x2: + return rewriteValueAMD64_OpBroadcast256MaskedInt64x2(v) + case OpBroadcast256MaskedInt8x16: + return rewriteValueAMD64_OpBroadcast256MaskedInt8x16(v) + case OpBroadcast256MaskedUint16x8: + return rewriteValueAMD64_OpBroadcast256MaskedUint16x8(v) + case OpBroadcast256MaskedUint32x4: + return rewriteValueAMD64_OpBroadcast256MaskedUint32x4(v) + case OpBroadcast256MaskedUint64x2: + return rewriteValueAMD64_OpBroadcast256MaskedUint64x2(v) + case OpBroadcast256MaskedUint8x16: + return rewriteValueAMD64_OpBroadcast256MaskedUint8x16(v) + case OpBroadcast256Uint16x8: + v.Op = OpAMD64VPBROADCASTW256 + return true + case OpBroadcast256Uint32x4: + v.Op = OpAMD64VPBROADCASTD256 + return true + case OpBroadcast256Uint64x2: + v.Op = OpAMD64VPBROADCASTQ256 + return true + case OpBroadcast256Uint8x16: + v.Op = OpAMD64VPBROADCASTB256 + return true + case OpBroadcast512Float32x4: + v.Op = OpAMD64VBROADCASTSS512 + return true + case OpBroadcast512Float64x2: + v.Op = OpAMD64VBROADCASTSD512 + return true + case OpBroadcast512Int16x8: + v.Op = OpAMD64VPBROADCASTW512 + return true + case OpBroadcast512Int32x4: + v.Op = OpAMD64VPBROADCASTD512 + return true + case OpBroadcast512Int64x2: + v.Op = OpAMD64VPBROADCASTQ512 + return true + case OpBroadcast512Int8x16: + v.Op = OpAMD64VPBROADCASTB512 + return true + case OpBroadcast512MaskedFloat32x4: + return rewriteValueAMD64_OpBroadcast512MaskedFloat32x4(v) + case OpBroadcast512MaskedFloat64x2: + return rewriteValueAMD64_OpBroadcast512MaskedFloat64x2(v) + case OpBroadcast512MaskedInt16x8: + return rewriteValueAMD64_OpBroadcast512MaskedInt16x8(v) + case OpBroadcast512MaskedInt32x4: + return rewriteValueAMD64_OpBroadcast512MaskedInt32x4(v) + case OpBroadcast512MaskedInt64x2: + return rewriteValueAMD64_OpBroadcast512MaskedInt64x2(v) + case OpBroadcast512MaskedInt8x16: + return rewriteValueAMD64_OpBroadcast512MaskedInt8x16(v) + case OpBroadcast512MaskedUint16x8: + return rewriteValueAMD64_OpBroadcast512MaskedUint16x8(v) + case OpBroadcast512MaskedUint32x4: + return rewriteValueAMD64_OpBroadcast512MaskedUint32x4(v) + case OpBroadcast512MaskedUint64x2: + return rewriteValueAMD64_OpBroadcast512MaskedUint64x2(v) + case OpBroadcast512MaskedUint8x16: + return rewriteValueAMD64_OpBroadcast512MaskedUint8x16(v) + case OpBroadcast512Uint16x8: + v.Op = OpAMD64VPBROADCASTW512 + return true + case OpBroadcast512Uint32x4: + v.Op = OpAMD64VPBROADCASTD512 + return true + case OpBroadcast512Uint64x2: + v.Op = OpAMD64VPBROADCASTQ512 + return true + case OpBroadcast512Uint8x16: + v.Op = OpAMD64VPBROADCASTB512 + return true case OpBswap16: return rewriteValueAMD64_OpBswap16(v) case OpBswap32: @@ -4539,6 +4689,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) + case OpSetElemFloat32x4: + v.Op = OpAMD64VPINSRD128 + return true + case OpSetElemFloat64x2: + v.Op = OpAMD64VPINSRQ128 + return true case OpSetElemInt16x8: v.Op = OpAMD64VPINSRW128 return true @@ -31628,6 +31784,486 @@ func rewriteValueAMD64_OpBitLen8(v *Value) bool { } return false } +func rewriteValueAMD64_OpBroadcast128MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedFloat32x4 x mask) + // result: (VBROADCASTSSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedFloat64x2 x mask) + // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt16x8 x mask) + // result: (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt32x4 x mask) + // result: (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt64x2 x mask) + // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt8x16 x mask) + // result: (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint16x8 x mask) + // result: (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint32x4 x mask) + // result: (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint64x2 x mask) + // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint8x16 x mask) + // result: (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedFloat32x4 x mask) + // result: (VBROADCASTSSMasked256 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedFloat64x2 x mask) + // result: (VBROADCASTSDMasked256 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt16x8 x mask) + // result: (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt32x4 x mask) + // result: (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt64x2 x mask) + // result: (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt8x16 x mask) + // result: (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint16x8 x mask) + // result: (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint32x4 x mask) + // result: (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint64x2 x mask) + // result: (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint8x16 x mask) + // result: (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedFloat32x4 x mask) + // result: (VBROADCASTSSMasked512 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedFloat64x2 x mask) + // result: (VBROADCASTSDMasked512 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt16x8 x mask) + // result: (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt32x4 x mask) + // result: (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt64x2 x mask) + // result: (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt8x16 x mask) + // result: (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint16x8 x mask) + // result: (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint32x4 x mask) + // result: (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint64x2 x mask) + // result: (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint8x16 x mask) + // result: (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] // match: (Bswap16 x) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index e14e02a71e5444..7a95a4450d4ce2 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -240,6 +240,66 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast128", opLen1(ssa.OpBroadcast128Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast128", opLen1(ssa.OpBroadcast128Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast128", opLen1(ssa.OpBroadcast128Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast128", opLen1(ssa.OpBroadcast128Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast128", opLen1(ssa.OpBroadcast128Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast128", opLen1(ssa.OpBroadcast128Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast128", opLen1(ssa.OpBroadcast128Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast128", opLen1(ssa.OpBroadcast128Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast128", opLen1(ssa.OpBroadcast128Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast128", opLen1(ssa.OpBroadcast128Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast256", opLen1(ssa.OpBroadcast256Float32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast256", opLen1(ssa.OpBroadcast256Float64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast256", opLen1(ssa.OpBroadcast256Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast256", opLen1(ssa.OpBroadcast256Int16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast256", opLen1(ssa.OpBroadcast256Int32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast256", opLen1(ssa.OpBroadcast256Int64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast256", opLen1(ssa.OpBroadcast256Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast256", opLen1(ssa.OpBroadcast256Uint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast256", opLen1(ssa.OpBroadcast256Uint32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast256", opLen1(ssa.OpBroadcast256Uint64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedFloat32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedFloat64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast512", opLen1(ssa.OpBroadcast512Float32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast512", opLen1(ssa.OpBroadcast512Float64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast512", opLen1(ssa.OpBroadcast512Int8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast512", opLen1(ssa.OpBroadcast512Int16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast512", opLen1(ssa.OpBroadcast512Int32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast512", opLen1(ssa.OpBroadcast512Int64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast512", opLen1(ssa.OpBroadcast512Uint8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast512", opLen1(ssa.OpBroadcast512Uint16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast512", opLen1(ssa.OpBroadcast512Uint32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast512", opLen1(ssa.OpBroadcast512Uint64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedFloat32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedFloat64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint64x2, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) @@ -1408,6 +1468,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SetElem", opLen2Imm8(ssa.OpSetElemFloat32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float64x2.SetElem", opLen2Imm8(ssa.OpSetElemFloat64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index c7c6aae374265d..8b36da71ab95a8 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -87,6 +87,23 @@ var ternaryFlaky = &shapes{ // for tests that support flaky equality floats: []int{32}, } +type templateData struct { + Vec string // the type of the vector, e.g. Float32x4 + AOrAn string // for documentation, the article "a" or "an" + Width int // the bit width of the element type, e.g. 32 + Vwidth int // the width of the vector type, e.g. 128 + Count int // the number of elements, e.g. 4 + WxC string // the width-by-type string, e.g., "32x4" + BxC string // as if bytes, in the proper count, e.g., "8x16" (W==8) + Base string // the capitalized Base Type of the vector, e.g., "Float" + Type string // the element type, e.g. "float32" + OxFF string // a mask for the lowest 'count' bits +} + +func (t templateData) As128BitVec() string { + return fmt.Sprintf("%s%dx%d", t.Base, t.Width, 128/t.Width) +} + func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer) { b := width * count if b < 128 || b > 512 { @@ -102,26 +119,17 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io aOrAn = "an" } oxFF := fmt.Sprintf("0x%x", uint64((1< Date: Fri, 1 Aug 2025 09:23:45 -0400 Subject: [PATCH 130/139] [dev.simd] simd: add emulations for missing AVX2 comparisons this also removes AVX512 versions of the operations that would use the same names, but not run on AVX2-only includes files generated by simdgen CL 692355 Change-Id: Iff29042245b7688133fed49a03e681e85235b8a8 Reviewed-on: https://go-review.googlesource.com/c/go/+/692335 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 16 - .../compile/internal/ssa/_gen/simdAMD64.rules | 72 - .../compile/internal/ssa/_gen/simdAMD64ops.go | 16 - .../internal/ssa/_gen/simdgenericOps.go | 72 - src/cmd/compile/internal/ssa/opGen.go | 704 -------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1440 ----------------- .../compile/internal/ssagen/simdintrinsics.go | 72 - src/simd/compare_test.go | 166 +- src/simd/genfiles.go | 136 ++ src/simd/ops_amd64.go | 360 ----- src/simd/slice_amd64.go | 636 ++++++++ 11 files changed, 859 insertions(+), 2831 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 73a947a88af24a..3ec8b484fb8a6d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -886,29 +886,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPCMPUB256, ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPUW256, ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPUD256, ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPCMPUQ256, ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPCMPB256, ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPW256, ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPQ256, ssa.OpAMD64VPCMPQ512: p = simdV2kImm8(s, v) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e7c5a1a97d372d..9670f035ba880b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -590,17 +590,9 @@ (GreaterInt64x2 ...) => (VPCMPGTQ128 ...) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) (GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) (GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) -(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) (GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) -(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) -(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) (GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) -(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) -(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) (GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) (GreaterEqualFloat32x4 x y) => (VCMPPS128 [13] x y) (GreaterEqualFloat32x8 x y) => (VCMPPS256 [13] x y) @@ -608,29 +600,13 @@ (GreaterEqualFloat64x2 x y) => (VCMPPD128 [13] x y) (GreaterEqualFloat64x4 x y) => (VCMPPD256 [13] x y) (GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) (GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) -(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) (GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) -(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) -(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) (GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) -(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) -(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) (GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) (GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) -(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) (GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) -(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) -(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) (GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) -(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) -(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) (GreaterEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) (GreaterEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) @@ -710,29 +686,13 @@ (LessFloat64x2 x y) => (VCMPPD128 [1] x y) (LessFloat64x4 x y) => (VCMPPD256 [1] x y) (LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) -(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) -(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) (LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) -(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) -(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) (LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) -(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) -(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) (LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) -(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) -(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) (LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) -(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) -(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) (LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) -(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) -(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) (LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) -(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) -(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) (LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) -(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) -(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) (LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) (LessEqualFloat32x4 x y) => (VCMPPS128 [2] x y) (LessEqualFloat32x8 x y) => (VCMPPS256 [2] x y) @@ -740,29 +700,13 @@ (LessEqualFloat64x2 x y) => (VCMPPD128 [2] x y) (LessEqualFloat64x4 x y) => (VCMPPD256 [2] x y) (LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) -(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) -(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) (LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) -(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) -(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) (LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) -(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) -(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) (LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) -(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) -(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) (LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) -(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) -(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) (LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) -(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) -(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) (LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) -(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) -(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) (LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) -(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) -(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) (LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) (LessEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) (LessEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) @@ -1050,29 +994,13 @@ (NotEqualFloat64x2 x y) => (VCMPPD128 [4] x y) (NotEqualFloat64x4 x y) => (VCMPPD256 [4] x y) (NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) -(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) -(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) (NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) -(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) -(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) (NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) -(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) -(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) (NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) -(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) -(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) (NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) -(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) -(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) (NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) -(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) -(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) (NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) -(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) -(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) (NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) -(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) -(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) (NotEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) (NotEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5d388a4531ba4b..61abaa5e9781dc 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -986,29 +986,13 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index f120dcddd0c0c9..4f2b1a91215891 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -514,17 +514,9 @@ func simdGenericOps() []opData { {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualMaskedFloat32x4", argLength: 3, commutative: false}, {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, @@ -556,17 +548,9 @@ func simdGenericOps() []opData { {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, @@ -616,17 +600,9 @@ func simdGenericOps() []opData { {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, - {name: "GreaterUint8x16", argLength: 2, commutative: false}, - {name: "GreaterUint8x32", argLength: 2, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, - {name: "GreaterUint16x8", argLength: 2, commutative: false}, - {name: "GreaterUint16x16", argLength: 2, commutative: false}, {name: "GreaterUint16x32", argLength: 2, commutative: false}, - {name: "GreaterUint32x4", argLength: 2, commutative: false}, - {name: "GreaterUint32x8", argLength: 2, commutative: false}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, - {name: "GreaterUint64x2", argLength: 2, commutative: false}, - {name: "GreaterUint64x4", argLength: 2, commutative: false}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -646,17 +622,9 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, - {name: "LessEqualInt8x16", argLength: 2, commutative: false}, - {name: "LessEqualInt8x32", argLength: 2, commutative: false}, {name: "LessEqualInt8x64", argLength: 2, commutative: false}, - {name: "LessEqualInt16x8", argLength: 2, commutative: false}, - {name: "LessEqualInt16x16", argLength: 2, commutative: false}, {name: "LessEqualInt16x32", argLength: 2, commutative: false}, - {name: "LessEqualInt32x4", argLength: 2, commutative: false}, - {name: "LessEqualInt32x8", argLength: 2, commutative: false}, {name: "LessEqualInt32x16", argLength: 2, commutative: false}, - {name: "LessEqualInt64x2", argLength: 2, commutative: false}, - {name: "LessEqualInt64x4", argLength: 2, commutative: false}, {name: "LessEqualInt64x8", argLength: 2, commutative: false}, {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessEqualMaskedFloat32x8", argLength: 3, commutative: false}, @@ -688,17 +656,9 @@ func simdGenericOps() []opData { {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessEqualUint8x16", argLength: 2, commutative: false}, - {name: "LessEqualUint8x32", argLength: 2, commutative: false}, {name: "LessEqualUint8x64", argLength: 2, commutative: false}, - {name: "LessEqualUint16x8", argLength: 2, commutative: false}, - {name: "LessEqualUint16x16", argLength: 2, commutative: false}, {name: "LessEqualUint16x32", argLength: 2, commutative: false}, - {name: "LessEqualUint32x4", argLength: 2, commutative: false}, - {name: "LessEqualUint32x8", argLength: 2, commutative: false}, {name: "LessEqualUint32x16", argLength: 2, commutative: false}, - {name: "LessEqualUint64x2", argLength: 2, commutative: false}, - {name: "LessEqualUint64x4", argLength: 2, commutative: false}, {name: "LessEqualUint64x8", argLength: 2, commutative: false}, {name: "LessFloat32x4", argLength: 2, commutative: false}, {name: "LessFloat32x8", argLength: 2, commutative: false}, @@ -706,17 +666,9 @@ func simdGenericOps() []opData { {name: "LessFloat64x2", argLength: 2, commutative: false}, {name: "LessFloat64x4", argLength: 2, commutative: false}, {name: "LessFloat64x8", argLength: 2, commutative: false}, - {name: "LessInt8x16", argLength: 2, commutative: false}, - {name: "LessInt8x32", argLength: 2, commutative: false}, {name: "LessInt8x64", argLength: 2, commutative: false}, - {name: "LessInt16x8", argLength: 2, commutative: false}, - {name: "LessInt16x16", argLength: 2, commutative: false}, {name: "LessInt16x32", argLength: 2, commutative: false}, - {name: "LessInt32x4", argLength: 2, commutative: false}, - {name: "LessInt32x8", argLength: 2, commutative: false}, {name: "LessInt32x16", argLength: 2, commutative: false}, - {name: "LessInt64x2", argLength: 2, commutative: false}, - {name: "LessInt64x4", argLength: 2, commutative: false}, {name: "LessInt64x8", argLength: 2, commutative: false}, {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessMaskedFloat32x8", argLength: 3, commutative: false}, @@ -748,17 +700,9 @@ func simdGenericOps() []opData { {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessUint8x16", argLength: 2, commutative: false}, - {name: "LessUint8x32", argLength: 2, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, - {name: "LessUint16x8", argLength: 2, commutative: false}, - {name: "LessUint16x16", argLength: 2, commutative: false}, {name: "LessUint16x32", argLength: 2, commutative: false}, - {name: "LessUint32x4", argLength: 2, commutative: false}, - {name: "LessUint32x8", argLength: 2, commutative: false}, {name: "LessUint32x16", argLength: 2, commutative: false}, - {name: "LessUint64x2", argLength: 2, commutative: false}, - {name: "LessUint64x4", argLength: 2, commutative: false}, {name: "LessUint64x8", argLength: 2, commutative: false}, {name: "MaxFloat32x4", argLength: 2, commutative: true}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, @@ -986,17 +930,9 @@ func simdGenericOps() []opData { {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, - {name: "NotEqualInt8x16", argLength: 2, commutative: true}, - {name: "NotEqualInt8x32", argLength: 2, commutative: true}, {name: "NotEqualInt8x64", argLength: 2, commutative: true}, - {name: "NotEqualInt16x8", argLength: 2, commutative: true}, - {name: "NotEqualInt16x16", argLength: 2, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, - {name: "NotEqualInt32x4", argLength: 2, commutative: true}, - {name: "NotEqualInt32x8", argLength: 2, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, - {name: "NotEqualInt64x2", argLength: 2, commutative: true}, - {name: "NotEqualInt64x4", argLength: 2, commutative: true}, {name: "NotEqualInt64x8", argLength: 2, commutative: true}, {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, @@ -1028,17 +964,9 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, - {name: "NotEqualUint8x16", argLength: 2, commutative: true}, - {name: "NotEqualUint8x32", argLength: 2, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, - {name: "NotEqualUint16x8", argLength: 2, commutative: true}, - {name: "NotEqualUint16x16", argLength: 2, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, - {name: "NotEqualUint32x4", argLength: 2, commutative: true}, - {name: "NotEqualUint32x8", argLength: 2, commutative: true}, {name: "NotEqualUint32x16", argLength: 2, commutative: true}, - {name: "NotEqualUint64x2", argLength: 2, commutative: true}, - {name: "NotEqualUint64x4", argLength: 2, commutative: true}, {name: "NotEqualUint64x8", argLength: 2, commutative: true}, {name: "OnesCountInt8x16", argLength: 1, commutative: false}, {name: "OnesCountInt8x32", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 6e0ffd15408587..7bcbf1b6157e19 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2208,29 +2208,13 @@ const ( OpAMD64VEXTRACTF64X4256 OpAMD64VEXTRACTI128128 OpAMD64VEXTRACTI64X4256 - OpAMD64VPCMPUB128 - OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 - OpAMD64VPCMPUW128 - OpAMD64VPCMPUW256 OpAMD64VPCMPUW512 - OpAMD64VPCMPUD128 - OpAMD64VPCMPUD256 OpAMD64VPCMPUD512 - OpAMD64VPCMPUQ128 - OpAMD64VPCMPUQ256 OpAMD64VPCMPUQ512 - OpAMD64VPCMPB128 - OpAMD64VPCMPB256 OpAMD64VPCMPB512 - OpAMD64VPCMPW128 - OpAMD64VPCMPW256 OpAMD64VPCMPW512 - OpAMD64VPCMPD128 - OpAMD64VPCMPD256 OpAMD64VPCMPD512 - OpAMD64VPCMPQ128 - OpAMD64VPCMPQ256 OpAMD64VPCMPQ512 OpAMD64VPROLD128 OpAMD64VPROLD256 @@ -5155,17 +5139,9 @@ const ( OpGreaterEqualFloat64x2 OpGreaterEqualFloat64x4 OpGreaterEqualFloat64x8 - OpGreaterEqualInt8x16 - OpGreaterEqualInt8x32 OpGreaterEqualInt8x64 - OpGreaterEqualInt16x8 - OpGreaterEqualInt16x16 OpGreaterEqualInt16x32 - OpGreaterEqualInt32x4 - OpGreaterEqualInt32x8 OpGreaterEqualInt32x16 - OpGreaterEqualInt64x2 - OpGreaterEqualInt64x4 OpGreaterEqualInt64x8 OpGreaterEqualMaskedFloat32x4 OpGreaterEqualMaskedFloat32x8 @@ -5197,17 +5173,9 @@ const ( OpGreaterEqualMaskedUint64x2 OpGreaterEqualMaskedUint64x4 OpGreaterEqualMaskedUint64x8 - OpGreaterEqualUint8x16 - OpGreaterEqualUint8x32 OpGreaterEqualUint8x64 - OpGreaterEqualUint16x8 - OpGreaterEqualUint16x16 OpGreaterEqualUint16x32 - OpGreaterEqualUint32x4 - OpGreaterEqualUint32x8 OpGreaterEqualUint32x16 - OpGreaterEqualUint64x2 - OpGreaterEqualUint64x4 OpGreaterEqualUint64x8 OpGreaterFloat32x4 OpGreaterFloat32x8 @@ -5257,17 +5225,9 @@ const ( OpGreaterMaskedUint64x2 OpGreaterMaskedUint64x4 OpGreaterMaskedUint64x8 - OpGreaterUint8x16 - OpGreaterUint8x32 OpGreaterUint8x64 - OpGreaterUint16x8 - OpGreaterUint16x16 OpGreaterUint16x32 - OpGreaterUint32x4 - OpGreaterUint32x8 OpGreaterUint32x16 - OpGreaterUint64x2 - OpGreaterUint64x4 OpGreaterUint64x8 OpIsNanFloat32x4 OpIsNanFloat32x8 @@ -5287,17 +5247,9 @@ const ( OpLessEqualFloat64x2 OpLessEqualFloat64x4 OpLessEqualFloat64x8 - OpLessEqualInt8x16 - OpLessEqualInt8x32 OpLessEqualInt8x64 - OpLessEqualInt16x8 - OpLessEqualInt16x16 OpLessEqualInt16x32 - OpLessEqualInt32x4 - OpLessEqualInt32x8 OpLessEqualInt32x16 - OpLessEqualInt64x2 - OpLessEqualInt64x4 OpLessEqualInt64x8 OpLessEqualMaskedFloat32x4 OpLessEqualMaskedFloat32x8 @@ -5329,17 +5281,9 @@ const ( OpLessEqualMaskedUint64x2 OpLessEqualMaskedUint64x4 OpLessEqualMaskedUint64x8 - OpLessEqualUint8x16 - OpLessEqualUint8x32 OpLessEqualUint8x64 - OpLessEqualUint16x8 - OpLessEqualUint16x16 OpLessEqualUint16x32 - OpLessEqualUint32x4 - OpLessEqualUint32x8 OpLessEqualUint32x16 - OpLessEqualUint64x2 - OpLessEqualUint64x4 OpLessEqualUint64x8 OpLessFloat32x4 OpLessFloat32x8 @@ -5347,17 +5291,9 @@ const ( OpLessFloat64x2 OpLessFloat64x4 OpLessFloat64x8 - OpLessInt8x16 - OpLessInt8x32 OpLessInt8x64 - OpLessInt16x8 - OpLessInt16x16 OpLessInt16x32 - OpLessInt32x4 - OpLessInt32x8 OpLessInt32x16 - OpLessInt64x2 - OpLessInt64x4 OpLessInt64x8 OpLessMaskedFloat32x4 OpLessMaskedFloat32x8 @@ -5389,17 +5325,9 @@ const ( OpLessMaskedUint64x2 OpLessMaskedUint64x4 OpLessMaskedUint64x8 - OpLessUint8x16 - OpLessUint8x32 OpLessUint8x64 - OpLessUint16x8 - OpLessUint16x16 OpLessUint16x32 - OpLessUint32x4 - OpLessUint32x8 OpLessUint32x16 - OpLessUint64x2 - OpLessUint64x4 OpLessUint64x8 OpMaxFloat32x4 OpMaxFloat32x8 @@ -5627,17 +5555,9 @@ const ( OpNotEqualFloat64x2 OpNotEqualFloat64x4 OpNotEqualFloat64x8 - OpNotEqualInt8x16 - OpNotEqualInt8x32 OpNotEqualInt8x64 - OpNotEqualInt16x8 - OpNotEqualInt16x16 OpNotEqualInt16x32 - OpNotEqualInt32x4 - OpNotEqualInt32x8 OpNotEqualInt32x16 - OpNotEqualInt64x2 - OpNotEqualInt64x4 OpNotEqualInt64x8 OpNotEqualMaskedFloat32x4 OpNotEqualMaskedFloat32x8 @@ -5669,17 +5589,9 @@ const ( OpNotEqualMaskedUint64x2 OpNotEqualMaskedUint64x4 OpNotEqualMaskedUint64x8 - OpNotEqualUint8x16 - OpNotEqualUint8x32 OpNotEqualUint8x64 - OpNotEqualUint16x8 - OpNotEqualUint16x16 OpNotEqualUint16x32 - OpNotEqualUint32x4 - OpNotEqualUint32x8 OpNotEqualUint32x16 - OpNotEqualUint64x2 - OpNotEqualUint64x4 OpNotEqualUint64x8 OpOnesCountInt8x16 OpOnesCountInt8x32 @@ -34328,36 +34240,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUB128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUB256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUB512", auxType: auxUInt8, @@ -34373,36 +34255,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUW128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUW256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUW512", auxType: auxUInt8, @@ -34418,36 +34270,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUD128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUD256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUD512", auxType: auxUInt8, @@ -34463,36 +34285,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUQ128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUQ256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUQ512", auxType: auxUInt8, @@ -34508,36 +34300,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPB128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPB256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPB512", auxType: auxUInt8, @@ -34553,36 +34315,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPW128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPW256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPW512", auxType: auxUInt8, @@ -34598,36 +34330,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPD128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPD256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPD512", auxType: auxUInt8, @@ -34643,36 +34345,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPQ128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPQ256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPQ512", auxType: auxUInt8, @@ -66750,61 +66422,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "GreaterEqualInt8x16", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt8x32", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, - { - name: "GreaterEqualInt16x8", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt16x16", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, - { - name: "GreaterEqualInt32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt32x8", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt32x16", argLen: 2, generic: true, }, - { - name: "GreaterEqualInt64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt64x4", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt64x8", argLen: 2, @@ -66960,61 +66592,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "GreaterEqualUint8x16", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint8x32", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint8x64", argLen: 2, generic: true, }, - { - name: "GreaterEqualUint16x8", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint16x16", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, - { - name: "GreaterEqualUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint32x8", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint32x16", argLen: 2, generic: true, }, - { - name: "GreaterEqualUint64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint64x4", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint64x8", argLen: 2, @@ -67260,61 +66852,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "GreaterUint8x16", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint8x32", - argLen: 2, - generic: true, - }, { name: "GreaterUint8x64", argLen: 2, generic: true, }, - { - name: "GreaterUint16x8", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint16x16", - argLen: 2, - generic: true, - }, { name: "GreaterUint16x32", argLen: 2, generic: true, }, - { - name: "GreaterUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint32x8", - argLen: 2, - generic: true, - }, { name: "GreaterUint32x16", argLen: 2, generic: true, }, - { - name: "GreaterUint64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint64x4", - argLen: 2, - generic: true, - }, { name: "GreaterUint64x8", argLen: 2, @@ -67422,61 +66974,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "LessEqualInt8x16", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt8x32", - argLen: 2, - generic: true, - }, { name: "LessEqualInt8x64", argLen: 2, generic: true, }, - { - name: "LessEqualInt16x8", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt16x16", - argLen: 2, - generic: true, - }, { name: "LessEqualInt16x32", argLen: 2, generic: true, }, - { - name: "LessEqualInt32x4", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt32x8", - argLen: 2, - generic: true, - }, { name: "LessEqualInt32x16", argLen: 2, generic: true, }, - { - name: "LessEqualInt64x2", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt64x4", - argLen: 2, - generic: true, - }, { name: "LessEqualInt64x8", argLen: 2, @@ -67632,61 +67144,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "LessEqualUint8x16", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint8x32", - argLen: 2, - generic: true, - }, { name: "LessEqualUint8x64", argLen: 2, generic: true, }, - { - name: "LessEqualUint16x8", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint16x16", - argLen: 2, - generic: true, - }, { name: "LessEqualUint16x32", argLen: 2, generic: true, }, - { - name: "LessEqualUint32x4", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint32x8", - argLen: 2, - generic: true, - }, { name: "LessEqualUint32x16", argLen: 2, generic: true, }, - { - name: "LessEqualUint64x2", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint64x4", - argLen: 2, - generic: true, - }, { name: "LessEqualUint64x8", argLen: 2, @@ -67722,61 +67194,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "LessInt8x16", - argLen: 2, - generic: true, - }, - { - name: "LessInt8x32", - argLen: 2, - generic: true, - }, { name: "LessInt8x64", argLen: 2, generic: true, }, - { - name: "LessInt16x8", - argLen: 2, - generic: true, - }, - { - name: "LessInt16x16", - argLen: 2, - generic: true, - }, { name: "LessInt16x32", argLen: 2, generic: true, }, - { - name: "LessInt32x4", - argLen: 2, - generic: true, - }, - { - name: "LessInt32x8", - argLen: 2, - generic: true, - }, { name: "LessInt32x16", argLen: 2, generic: true, }, - { - name: "LessInt64x2", - argLen: 2, - generic: true, - }, - { - name: "LessInt64x4", - argLen: 2, - generic: true, - }, { name: "LessInt64x8", argLen: 2, @@ -67932,61 +67364,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "LessUint8x16", - argLen: 2, - generic: true, - }, - { - name: "LessUint8x32", - argLen: 2, - generic: true, - }, { name: "LessUint8x64", argLen: 2, generic: true, }, - { - name: "LessUint16x8", - argLen: 2, - generic: true, - }, - { - name: "LessUint16x16", - argLen: 2, - generic: true, - }, { name: "LessUint16x32", argLen: 2, generic: true, }, - { - name: "LessUint32x4", - argLen: 2, - generic: true, - }, - { - name: "LessUint32x8", - argLen: 2, - generic: true, - }, { name: "LessUint32x16", argLen: 2, generic: true, }, - { - name: "LessUint64x2", - argLen: 2, - generic: true, - }, - { - name: "LessUint64x4", - argLen: 2, - generic: true, - }, { name: "LessUint64x8", argLen: 2, @@ -69312,72 +68704,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "NotEqualInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt16x32", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualInt32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt32x16", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt64x8", argLen: 2, @@ -69564,72 +68908,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "NotEqualUint8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint8x32", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint8x64", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint16x32", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint32x16", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint64x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 0bdc0e63b7b536..0e2e2311f0f3c1 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2304,28 +2304,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterEqualFloat64x4(v) case OpGreaterEqualFloat64x8: return rewriteValueAMD64_OpGreaterEqualFloat64x8(v) - case OpGreaterEqualInt16x16: - return rewriteValueAMD64_OpGreaterEqualInt16x16(v) case OpGreaterEqualInt16x32: return rewriteValueAMD64_OpGreaterEqualInt16x32(v) - case OpGreaterEqualInt16x8: - return rewriteValueAMD64_OpGreaterEqualInt16x8(v) case OpGreaterEqualInt32x16: return rewriteValueAMD64_OpGreaterEqualInt32x16(v) - case OpGreaterEqualInt32x4: - return rewriteValueAMD64_OpGreaterEqualInt32x4(v) - case OpGreaterEqualInt32x8: - return rewriteValueAMD64_OpGreaterEqualInt32x8(v) - case OpGreaterEqualInt64x2: - return rewriteValueAMD64_OpGreaterEqualInt64x2(v) - case OpGreaterEqualInt64x4: - return rewriteValueAMD64_OpGreaterEqualInt64x4(v) case OpGreaterEqualInt64x8: return rewriteValueAMD64_OpGreaterEqualInt64x8(v) - case OpGreaterEqualInt8x16: - return rewriteValueAMD64_OpGreaterEqualInt8x16(v) - case OpGreaterEqualInt8x32: - return rewriteValueAMD64_OpGreaterEqualInt8x32(v) case OpGreaterEqualInt8x64: return rewriteValueAMD64_OpGreaterEqualInt8x64(v) case OpGreaterEqualMaskedFloat32x16: @@ -2388,28 +2372,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v) case OpGreaterEqualMaskedUint8x64: return rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v) - case OpGreaterEqualUint16x16: - return rewriteValueAMD64_OpGreaterEqualUint16x16(v) case OpGreaterEqualUint16x32: return rewriteValueAMD64_OpGreaterEqualUint16x32(v) - case OpGreaterEqualUint16x8: - return rewriteValueAMD64_OpGreaterEqualUint16x8(v) case OpGreaterEqualUint32x16: return rewriteValueAMD64_OpGreaterEqualUint32x16(v) - case OpGreaterEqualUint32x4: - return rewriteValueAMD64_OpGreaterEqualUint32x4(v) - case OpGreaterEqualUint32x8: - return rewriteValueAMD64_OpGreaterEqualUint32x8(v) - case OpGreaterEqualUint64x2: - return rewriteValueAMD64_OpGreaterEqualUint64x2(v) - case OpGreaterEqualUint64x4: - return rewriteValueAMD64_OpGreaterEqualUint64x4(v) case OpGreaterEqualUint64x8: return rewriteValueAMD64_OpGreaterEqualUint64x8(v) - case OpGreaterEqualUint8x16: - return rewriteValueAMD64_OpGreaterEqualUint8x16(v) - case OpGreaterEqualUint8x32: - return rewriteValueAMD64_OpGreaterEqualUint8x32(v) case OpGreaterEqualUint8x64: return rewriteValueAMD64_OpGreaterEqualUint8x64(v) case OpGreaterFloat32x16: @@ -2516,28 +2484,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterMaskedUint8x32(v) case OpGreaterMaskedUint8x64: return rewriteValueAMD64_OpGreaterMaskedUint8x64(v) - case OpGreaterUint16x16: - return rewriteValueAMD64_OpGreaterUint16x16(v) case OpGreaterUint16x32: return rewriteValueAMD64_OpGreaterUint16x32(v) - case OpGreaterUint16x8: - return rewriteValueAMD64_OpGreaterUint16x8(v) case OpGreaterUint32x16: return rewriteValueAMD64_OpGreaterUint32x16(v) - case OpGreaterUint32x4: - return rewriteValueAMD64_OpGreaterUint32x4(v) - case OpGreaterUint32x8: - return rewriteValueAMD64_OpGreaterUint32x8(v) - case OpGreaterUint64x2: - return rewriteValueAMD64_OpGreaterUint64x2(v) - case OpGreaterUint64x4: - return rewriteValueAMD64_OpGreaterUint64x4(v) case OpGreaterUint64x8: return rewriteValueAMD64_OpGreaterUint64x8(v) - case OpGreaterUint8x16: - return rewriteValueAMD64_OpGreaterUint8x16(v) - case OpGreaterUint8x32: - return rewriteValueAMD64_OpGreaterUint8x32(v) case OpGreaterUint8x64: return rewriteValueAMD64_OpGreaterUint8x64(v) case OpHasCPUFeature: @@ -2639,28 +2591,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessEqualFloat64x4(v) case OpLessEqualFloat64x8: return rewriteValueAMD64_OpLessEqualFloat64x8(v) - case OpLessEqualInt16x16: - return rewriteValueAMD64_OpLessEqualInt16x16(v) case OpLessEqualInt16x32: return rewriteValueAMD64_OpLessEqualInt16x32(v) - case OpLessEqualInt16x8: - return rewriteValueAMD64_OpLessEqualInt16x8(v) case OpLessEqualInt32x16: return rewriteValueAMD64_OpLessEqualInt32x16(v) - case OpLessEqualInt32x4: - return rewriteValueAMD64_OpLessEqualInt32x4(v) - case OpLessEqualInt32x8: - return rewriteValueAMD64_OpLessEqualInt32x8(v) - case OpLessEqualInt64x2: - return rewriteValueAMD64_OpLessEqualInt64x2(v) - case OpLessEqualInt64x4: - return rewriteValueAMD64_OpLessEqualInt64x4(v) case OpLessEqualInt64x8: return rewriteValueAMD64_OpLessEqualInt64x8(v) - case OpLessEqualInt8x16: - return rewriteValueAMD64_OpLessEqualInt8x16(v) - case OpLessEqualInt8x32: - return rewriteValueAMD64_OpLessEqualInt8x32(v) case OpLessEqualInt8x64: return rewriteValueAMD64_OpLessEqualInt8x64(v) case OpLessEqualMaskedFloat32x16: @@ -2723,28 +2659,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessEqualMaskedUint8x32(v) case OpLessEqualMaskedUint8x64: return rewriteValueAMD64_OpLessEqualMaskedUint8x64(v) - case OpLessEqualUint16x16: - return rewriteValueAMD64_OpLessEqualUint16x16(v) case OpLessEqualUint16x32: return rewriteValueAMD64_OpLessEqualUint16x32(v) - case OpLessEqualUint16x8: - return rewriteValueAMD64_OpLessEqualUint16x8(v) case OpLessEqualUint32x16: return rewriteValueAMD64_OpLessEqualUint32x16(v) - case OpLessEqualUint32x4: - return rewriteValueAMD64_OpLessEqualUint32x4(v) - case OpLessEqualUint32x8: - return rewriteValueAMD64_OpLessEqualUint32x8(v) - case OpLessEqualUint64x2: - return rewriteValueAMD64_OpLessEqualUint64x2(v) - case OpLessEqualUint64x4: - return rewriteValueAMD64_OpLessEqualUint64x4(v) case OpLessEqualUint64x8: return rewriteValueAMD64_OpLessEqualUint64x8(v) - case OpLessEqualUint8x16: - return rewriteValueAMD64_OpLessEqualUint8x16(v) - case OpLessEqualUint8x32: - return rewriteValueAMD64_OpLessEqualUint8x32(v) case OpLessEqualUint8x64: return rewriteValueAMD64_OpLessEqualUint8x64(v) case OpLessFloat32x16: @@ -2759,28 +2679,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessFloat64x4(v) case OpLessFloat64x8: return rewriteValueAMD64_OpLessFloat64x8(v) - case OpLessInt16x16: - return rewriteValueAMD64_OpLessInt16x16(v) case OpLessInt16x32: return rewriteValueAMD64_OpLessInt16x32(v) - case OpLessInt16x8: - return rewriteValueAMD64_OpLessInt16x8(v) case OpLessInt32x16: return rewriteValueAMD64_OpLessInt32x16(v) - case OpLessInt32x4: - return rewriteValueAMD64_OpLessInt32x4(v) - case OpLessInt32x8: - return rewriteValueAMD64_OpLessInt32x8(v) - case OpLessInt64x2: - return rewriteValueAMD64_OpLessInt64x2(v) - case OpLessInt64x4: - return rewriteValueAMD64_OpLessInt64x4(v) case OpLessInt64x8: return rewriteValueAMD64_OpLessInt64x8(v) - case OpLessInt8x16: - return rewriteValueAMD64_OpLessInt8x16(v) - case OpLessInt8x32: - return rewriteValueAMD64_OpLessInt8x32(v) case OpLessInt8x64: return rewriteValueAMD64_OpLessInt8x64(v) case OpLessMaskedFloat32x16: @@ -2843,28 +2747,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessMaskedUint8x32(v) case OpLessMaskedUint8x64: return rewriteValueAMD64_OpLessMaskedUint8x64(v) - case OpLessUint16x16: - return rewriteValueAMD64_OpLessUint16x16(v) case OpLessUint16x32: return rewriteValueAMD64_OpLessUint16x32(v) - case OpLessUint16x8: - return rewriteValueAMD64_OpLessUint16x8(v) case OpLessUint32x16: return rewriteValueAMD64_OpLessUint32x16(v) - case OpLessUint32x4: - return rewriteValueAMD64_OpLessUint32x4(v) - case OpLessUint32x8: - return rewriteValueAMD64_OpLessUint32x8(v) - case OpLessUint64x2: - return rewriteValueAMD64_OpLessUint64x2(v) - case OpLessUint64x4: - return rewriteValueAMD64_OpLessUint64x4(v) case OpLessUint64x8: return rewriteValueAMD64_OpLessUint64x8(v) - case OpLessUint8x16: - return rewriteValueAMD64_OpLessUint8x16(v) - case OpLessUint8x32: - return rewriteValueAMD64_OpLessUint8x32(v) case OpLessUint8x64: return rewriteValueAMD64_OpLessUint8x64(v) case OpLoad: @@ -3583,28 +3471,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualFloat64x4(v) case OpNotEqualFloat64x8: return rewriteValueAMD64_OpNotEqualFloat64x8(v) - case OpNotEqualInt16x16: - return rewriteValueAMD64_OpNotEqualInt16x16(v) case OpNotEqualInt16x32: return rewriteValueAMD64_OpNotEqualInt16x32(v) - case OpNotEqualInt16x8: - return rewriteValueAMD64_OpNotEqualInt16x8(v) case OpNotEqualInt32x16: return rewriteValueAMD64_OpNotEqualInt32x16(v) - case OpNotEqualInt32x4: - return rewriteValueAMD64_OpNotEqualInt32x4(v) - case OpNotEqualInt32x8: - return rewriteValueAMD64_OpNotEqualInt32x8(v) - case OpNotEqualInt64x2: - return rewriteValueAMD64_OpNotEqualInt64x2(v) - case OpNotEqualInt64x4: - return rewriteValueAMD64_OpNotEqualInt64x4(v) case OpNotEqualInt64x8: return rewriteValueAMD64_OpNotEqualInt64x8(v) - case OpNotEqualInt8x16: - return rewriteValueAMD64_OpNotEqualInt8x16(v) - case OpNotEqualInt8x32: - return rewriteValueAMD64_OpNotEqualInt8x32(v) case OpNotEqualInt8x64: return rewriteValueAMD64_OpNotEqualInt8x64(v) case OpNotEqualMaskedFloat32x16: @@ -3667,28 +3539,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualMaskedUint8x32(v) case OpNotEqualMaskedUint8x64: return rewriteValueAMD64_OpNotEqualMaskedUint8x64(v) - case OpNotEqualUint16x16: - return rewriteValueAMD64_OpNotEqualUint16x16(v) case OpNotEqualUint16x32: return rewriteValueAMD64_OpNotEqualUint16x32(v) - case OpNotEqualUint16x8: - return rewriteValueAMD64_OpNotEqualUint16x8(v) case OpNotEqualUint32x16: return rewriteValueAMD64_OpNotEqualUint32x16(v) - case OpNotEqualUint32x4: - return rewriteValueAMD64_OpNotEqualUint32x4(v) - case OpNotEqualUint32x8: - return rewriteValueAMD64_OpNotEqualUint32x8(v) - case OpNotEqualUint64x2: - return rewriteValueAMD64_OpNotEqualUint64x2(v) - case OpNotEqualUint64x4: - return rewriteValueAMD64_OpNotEqualUint64x4(v) case OpNotEqualUint64x8: return rewriteValueAMD64_OpNotEqualUint64x8(v) - case OpNotEqualUint8x16: - return rewriteValueAMD64_OpNotEqualUint8x16(v) - case OpNotEqualUint8x32: - return rewriteValueAMD64_OpNotEqualUint8x32(v) case OpNotEqualUint8x64: return rewriteValueAMD64_OpNotEqualUint8x64(v) case OpOffPtr: @@ -37872,24 +37728,6 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37908,24 +37746,6 @@ func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37944,78 +37764,6 @@ func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38034,42 +37782,6 @@ func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38748,24 +38460,6 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38784,24 +38478,6 @@ func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38820,78 +38496,6 @@ func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38910,42 +38514,6 @@ func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39784,24 +39352,6 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39820,24 +39370,6 @@ func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39856,78 +39388,6 @@ func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39946,42 +39406,6 @@ func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40699,24 +40123,6 @@ func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40735,24 +40141,6 @@ func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40771,78 +40159,6 @@ func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40861,42 +40177,6 @@ func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41575,24 +40855,6 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41611,24 +40873,6 @@ func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41647,78 +40891,6 @@ func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41737,42 +40909,6 @@ func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41883,24 +41019,6 @@ func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41919,24 +41037,6 @@ func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41955,78 +41055,6 @@ func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42045,42 +41073,6 @@ func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42759,24 +41751,6 @@ func rewriteValueAMD64_OpLessMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42795,24 +41769,6 @@ func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42831,78 +41787,6 @@ func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42921,42 +41805,6 @@ func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47070,24 +45918,6 @@ func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47106,24 +45936,6 @@ func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47142,78 +45954,6 @@ func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47232,42 +45972,6 @@ func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47946,24 +46650,6 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47982,24 +46668,6 @@ func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -48018,78 +46686,6 @@ func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -48108,42 +46704,6 @@ func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 7a95a4450d4ce2..682a37e91ba274 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -602,17 +602,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) @@ -620,29 +612,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) @@ -722,29 +698,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) @@ -752,29 +712,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) @@ -1062,29 +1006,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) diff --git a/src/simd/compare_test.go b/src/simd/compare_test.go index 19b1f3886d7d1b..7fd20cf5d79f2a 100644 --- a/src/simd/compare_test.go +++ b/src/simd/compare_test.go @@ -59,17 +59,32 @@ func TestLess(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.Less, lessSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.Less, lessSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) - - } + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) if simd.HasAVX512() { testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) @@ -100,28 +115,25 @@ func TestLessEqual(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.LessEqual, lessEqualSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.LessEqual, lessEqualSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) - - } + testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) if simd.HasAVX512() { - testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) - testFloat32x16Compare(t, simd.Float32x16.LessEqual, lessEqualSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.LessEqual, lessEqualSlice[float64]) testInt8x64Compare(t, simd.Int8x64.LessEqual, lessEqualSlice[int8]) @@ -151,16 +163,17 @@ func TestGreater(t *testing.T) { testInt8x16Compare(t, simd.Int8x16.Greater, greaterSlice[int8]) testInt8x32Compare(t, simd.Int8x32.Greater, greaterSlice[int8]) - if simd.HasAVX512() { - testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) + testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) + testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) + + if simd.HasAVX512() { testFloat32x16Compare(t, simd.Float32x16.Greater, greaterSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.Greater, greaterSlice[float64]) @@ -181,28 +194,25 @@ func TestGreaterEqual(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.GreaterEqual, greaterEqualSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.GreaterEqual, greaterEqualSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) - - } + testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) if simd.HasAVX512() { - testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) - testFloat32x16Compare(t, simd.Float32x16.GreaterEqual, greaterEqualSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.GreaterEqual, greaterEqualSlice[float64]) testInt8x64Compare(t, simd.Int8x64.GreaterEqual, greaterEqualSlice[int8]) @@ -260,25 +270,23 @@ func TestNotEqual(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.NotEqual, notEqualSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.NotEqual, notEqualSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) - } + testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) if simd.HasAVX512() { testFloat32x16Compare(t, simd.Float32x16.NotEqual, notEqualSlice[float32]) diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 8b36da71ab95a8..022ddd168138e1 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -87,6 +87,16 @@ var ternaryFlaky = &shapes{ // for tests that support flaky equality floats: []int{32}, } +var avx2SignedComparisons = &shapes{ + vecs: []int{128, 256}, + ints: []int{8, 16, 32, 64}, +} + +var avx2UnsignedComparisons = &shapes{ + vecs: []int{128, 256}, + uints: []int{8, 16, 32, 64}, +} + type templateData struct { Vec string // the type of the vector, e.g. Float32x4 AOrAn string // for documentation, the article "a" or "an" @@ -486,6 +496,130 @@ func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { } `) +func (t templateData) CPUfeature() string { + switch t.Vwidth { + case 128: + return "AVX" + case 256: + return "AVX2" + case 512: + return "AVX512" + } + panic(fmt.Errorf("unexpected vector width %d", t.Vwidth)) +} + +var avx2SignedComparisonsTemplate = shapedTemplateOf(avx2SignedComparisons, "avx2 signed comparisons", ` +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return y.Greater(x).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return x.Greater(y).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return x.Equal(y).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} +`) + +// CPUfeatureAVX2if8 return AVX2 if the element width is 8, +// otherwise, it returns CPUfeature. This is for the cpufeature +// of unsigned comparison emulation, which uses shifts for all +// the sizes > 8 (shifts are AVX) but must use broadcast (AVX2) +// for bytes. +func (t templateData) CPUfeatureAVX2if8() string { + if t.Width == 8 { + return "AVX2" + } + return t.CPUfeature() +} + +var avx2UnsignedComparisonsTemplate = shapedTemplateOf(avx2UnsignedComparisons, "avx2 unsigned comparisons", ` +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) Greater(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + ones := x.Equal(x).AsInt{{.WxC}}() + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + ones := x.Equal(x).AsInt{{.WxC}}() + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return b.Xor(signs).Greater(a.Xor(signs)).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return a.Xor(signs).Greater(b.Xor(signs)).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() + return a.Equal(b).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} +`) + var unsafePATemplate = templateOf("unsafe PA helper", ` // pa{{.Vec}} returns a type-unsafe pointer to array that can // only be used with partial load/store operations that only @@ -591,6 +725,8 @@ func main() { avx2SmallLoadSlicePartTemplate, avx2MaskedTemplate, avx512MaskedTemplate, + avx2SignedComparisonsTemplate, + avx2UnsignedComparisonsTemplate, broadcastTemplate, ) } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5b7754a9611b50..d78bb699eaac23 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3822,61 +3822,21 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) Greater(y Uint8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) Greater(y Uint8x32) Mask8x32 - // Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Greater(y Uint8x64) Mask8x64 -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) Greater(y Uint16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) Greater(y Uint16x16) Mask16x16 - // Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Greater(y Uint16x32) Mask16x32 -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) Greater(y Uint32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) Greater(y Uint32x8) Mask32x8 - // Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Greater(y Uint32x16) Mask32x16 -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) Greater(y Uint64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) Greater(y Uint64x4) Mask64x4 - // Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -3914,121 +3874,41 @@ func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -4566,121 +4446,41 @@ func (x Float64x4) Less(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Less(y Float64x8) Mask64x8 -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) Less(y Int8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) Less(y Int8x32) Mask8x32 - // Less compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) Less(y Int8x64) Mask8x64 -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) Less(y Int16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) Less(y Int16x16) Mask16x16 - // Less compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) Less(y Int16x32) Mask16x32 -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) Less(y Int32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) Less(y Int32x8) Mask32x8 - // Less compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) Less(y Int32x16) Mask32x16 -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) Less(y Int64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) Less(y Int64x4) Mask64x4 - // Less compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) Less(y Int64x8) Mask64x8 -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) Less(y Uint8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) Less(y Uint8x32) Mask8x32 - // Less compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) Less(y Uint16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) Less(y Uint16x16) Mask16x16 - // Less compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) Less(y Uint32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) Less(y Uint32x8) Mask32x8 - // Less compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) Less(y Uint64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) Less(y Uint64x4) Mask64x4 - // Less compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -4718,121 +4518,41 @@ func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessEqual(y Float64x8) Mask64x8 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 - // LessEqual compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessEqual(y Int8x64) Mask8x64 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 - // LessEqual compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessEqual(y Int16x32) Mask16x32 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 - // LessEqual compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessEqual(y Int32x16) Mask32x16 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 - // LessEqual compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessEqual(y Int64x8) Mask64x8 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 - // LessEqual compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 - // LessEqual compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 - // LessEqual compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 - // LessEqual compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -6644,121 +6364,41 @@ func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) NotEqual(y Float64x8) Mask64x8 -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 - // NotEqual compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) NotEqual(y Int8x64) Mask8x64 -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 - // NotEqual compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) NotEqual(y Int16x32) Mask16x32 -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 - // NotEqual compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) NotEqual(y Int32x16) Mask32x16 -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 - // NotEqual compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) NotEqual(y Int64x8) Mask64x8 -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 - // NotEqual compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 - // NotEqual compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 - // NotEqual compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 - // NotEqual compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512 diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index 8e721d90279680..3ad2672a05b023 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -1500,6 +1500,642 @@ func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { return iy.blendMasked(ix, mask).AsFloat64x8() } +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int8x16) Less(y Int8x16) Mask8x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return y.Greater(x).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Greater(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Equal(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int16x8) Less(y Int16x8) Mask16x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return y.Greater(x).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Greater(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Equal(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int32x4) Less(y Int32x4) Mask32x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return y.Greater(x).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Greater(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Equal(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int64x2) Less(y Int64x2) Mask64x2 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return y.Greater(x).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Greater(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Equal(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) Less(y Int8x32) Mask8x32 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return y.Greater(x).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Greater(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Equal(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) Less(y Int16x16) Mask16x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return y.Greater(x).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Greater(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Equal(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) Less(y Int32x8) Mask32x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return y.Greater(x).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Greater(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Equal(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) Less(y Int64x4) Mask64x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return y.Greater(x).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Greater(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Equal(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Less(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + return a.Equal(b).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Less(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + return a.Equal(b).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + return a.Equal(b).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + return a.Equal(b).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Less(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + return a.Equal(b).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Less(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + return a.Equal(b).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Less(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + return a.Equal(b).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Less(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + return a.Equal(b).AsInt64x4().Xor(ones).AsMask64x4() +} + // BroadcastInt8x16 returns a vector with the input // x assigned to all elements of the output. // From 858a8d2276ee00d5f04258f406a13fc6f86386cd Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 8 Aug 2025 13:28:07 -0400 Subject: [PATCH 131/139] [dev.simd] simd: reorganize/rename generated emulation files Change-Id: I8c755d3b6a1a16ac271a22ab2bd2abb308441563 Reviewed-on: https://go-review.googlesource.com/c/go/+/694097 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/simd/compare_gen_amd64.go | 641 ++++++++ src/simd/genfiles.go | 25 +- src/simd/maskmerge_gen_amd64.go | 403 ++++++ src/simd/other_gen_amd64.go | 275 ++++ src/simd/slice_amd64.go | 2407 ------------------------------- src/simd/slice_gen_amd64.go | 1103 ++++++++++++++ 6 files changed, 2441 insertions(+), 2413 deletions(-) create mode 100644 src/simd/compare_gen_amd64.go create mode 100644 src/simd/maskmerge_gen_amd64.go create mode 100644 src/simd/other_gen_amd64.go delete mode 100644 src/simd/slice_amd64.go create mode 100644 src/simd/slice_gen_amd64.go diff --git a/src/simd/compare_gen_amd64.go b/src/simd/compare_gen_amd64.go new file mode 100644 index 00000000000000..65919fe403149e --- /dev/null +++ b/src/simd/compare_gen_amd64.go @@ -0,0 +1,641 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int8x16) Less(y Int8x16) Mask8x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return y.Greater(x).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Greater(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Equal(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int16x8) Less(y Int16x8) Mask16x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return y.Greater(x).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Greater(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Equal(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int32x4) Less(y Int32x4) Mask32x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return y.Greater(x).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Greater(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Equal(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int64x2) Less(y Int64x2) Mask64x2 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return y.Greater(x).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Greater(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Equal(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) Less(y Int8x32) Mask8x32 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return y.Greater(x).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Greater(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Equal(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) Less(y Int16x16) Mask16x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return y.Greater(x).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Greater(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Equal(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) Less(y Int32x8) Mask32x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return y.Greater(x).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Greater(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Equal(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) Less(y Int64x4) Mask64x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return y.Greater(x).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Greater(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Equal(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Less(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + return a.Equal(b).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Less(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + return a.Equal(b).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + return a.Equal(b).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + return a.Equal(b).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Less(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + return a.Equal(b).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Less(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + return a.Equal(b).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Less(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + return a.Equal(b).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Less(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + return a.Equal(b).AsInt64x4().Xor(ones).AsMask64x4() +} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 022ddd168138e1..a1da5ad05612d2 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -175,8 +175,6 @@ func prologue(s string, out io.Writer) { package simd -import "unsafe" - `, s) } @@ -708,7 +706,10 @@ func Broadcast{{.Vec}}(x {{.Type}}) {{.Vec}} { `) func main() { - sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") + sl := flag.String("sl", "slice_gen_amd64.go", "file name for slice operations") + cm := flag.String("cm", "compare_gen_amd64.go", "file name for comparison operations") + mm := flag.String("mm", "maskmerge_gen_amd64.go", "file name for mask/merge operations") + op := flag.String("op", "other_gen_amd64.go", "file name for other operations") ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") bh := flag.String("bh", "binary_helpers_test.go", "file name for binary test helpers") uh := flag.String("uh", "unary_helpers_test.go", "file name for unary test helpers") @@ -718,15 +719,27 @@ func main() { flag.Parse() if *sl != "" { - one(*sl, prologue, + one(*sl, unsafePrologue, sliceTemplate, avx512MaskedLoadSlicePartTemplate, avx2MaskedLoadSlicePartTemplate, avx2SmallLoadSlicePartTemplate, - avx2MaskedTemplate, - avx512MaskedTemplate, + ) + } + if *cm != "" { + one(*cm, prologue, avx2SignedComparisonsTemplate, avx2UnsignedComparisonsTemplate, + ) + } + if *mm != "" { + one(*mm, prologue, + avx2MaskedTemplate, + avx512MaskedTemplate, + ) + } + if *op != "" { + one(*op, prologue, broadcastTemplate, ) } diff --git a/src/simd/maskmerge_gen_amd64.go b/src/simd/maskmerge_gen_amd64.go new file mode 100644 index 00000000000000..71a617c4250de6 --- /dev/null +++ b/src/simd/maskmerge_gen_amd64.go @@ -0,0 +1,403 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x16) Masked(mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x16) Merge(y Int8x16, mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x8) Masked(mask Mask16x8) Int16x8 { + im := mask.AsInt16x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x8) Merge(y Int16x8, mask Mask16x8) Int16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x4) Masked(mask Mask32x4) Int32x4 { + im := mask.AsInt32x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x4) Merge(y Int32x4, mask Mask32x4) Int32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x2) Masked(mask Mask64x2) Int64x2 { + im := mask.AsInt64x2() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x2) Merge(y Int64x2, mask Mask64x2) Int64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x16) Masked(mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + return x.AsInt8x16().And(im).AsUint8x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x16) Merge(y Uint8x16, mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint8x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x8) Masked(mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8() + return x.AsInt16x8().And(im).AsUint16x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x8) Merge(y Uint16x8, mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x4) Masked(mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsUint32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x4) Merge(y Uint32x4, mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x2) Masked(mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsUint64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x2) Merge(y Uint64x2, mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x4) Masked(mask Mask32x4) Float32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsFloat32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x4) Merge(y Float32x4, mask Mask32x4) Float32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x2) Masked(mask Mask64x2) Float64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsFloat64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x2) Merge(y Float64x2, mask Mask64x2) Float64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x32) Masked(mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x32) Merge(y Int8x32, mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x16) Masked(mask Mask16x16) Int16x16 { + im := mask.AsInt16x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x16) Merge(y Int16x16, mask Mask16x16) Int16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x8) Masked(mask Mask32x8) Int32x8 { + im := mask.AsInt32x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x8) Merge(y Int32x8, mask Mask32x8) Int32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x4) Masked(mask Mask64x4) Int64x4 { + im := mask.AsInt64x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x4) Merge(y Int64x4, mask Mask64x4) Int64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x32) Masked(mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + return x.AsInt8x32().And(im).AsUint8x32() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x32) Merge(y Uint8x32, mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint8x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x16) Masked(mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16() + return x.AsInt16x16().And(im).AsUint16x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x16) Merge(y Uint16x16, mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x8) Masked(mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsUint32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x8) Merge(y Uint32x8, mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x4) Masked(mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsUint64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x4) Merge(y Uint64x4, mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x8) Masked(mask Mask32x8) Float32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsFloat32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x8) Merge(y Float32x8, mask Mask32x8) Float32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x4) Masked(mask Mask64x4) Float64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsFloat64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x4) Merge(y Float64x4, mask Mask64x4) Float64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x64) Masked(mask Mask8x64) Int8x64 { + im := mask.AsInt8x64() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int8x64) Merge(y Int8x64, mask Mask8x64) Int8x64 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x32) Masked(mask Mask16x32) Int16x32 { + im := mask.AsInt16x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int16x32) Merge(y Int16x32, mask Mask16x32) Int16x32 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x16) Masked(mask Mask32x16) Int32x16 { + im := mask.AsInt32x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int32x16) Merge(y Int32x16, mask Mask32x16) Int32x16 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x8) Masked(mask Mask64x8) Int64x8 { + im := mask.AsInt64x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int64x8) Merge(y Int64x8, mask Mask64x8) Int64x8 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x64) Masked(mask Mask8x64) Uint8x64 { + im := mask.AsInt8x64() + return x.AsInt8x64().And(im).AsUint8x64() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint8x64) Merge(y Uint8x64, mask Mask8x64) Uint8x64 { + ix := x.AsInt8x64() + iy := y.AsInt8x64() + return iy.blendMasked(ix, mask).AsUint8x64() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x32) Masked(mask Mask16x32) Uint16x32 { + im := mask.AsInt16x32() + return x.AsInt16x32().And(im).AsUint16x32() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint16x32) Merge(y Uint16x32, mask Mask16x32) Uint16x32 { + ix := x.AsInt16x32() + iy := y.AsInt16x32() + return iy.blendMasked(ix, mask).AsUint16x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x16) Masked(mask Mask32x16) Uint32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsUint32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint32x16) Merge(y Uint32x16, mask Mask32x16) Uint32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsUint32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x8) Masked(mask Mask64x8) Uint64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsUint64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint64x8) Merge(y Uint64x8, mask Mask64x8) Uint64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsUint64x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x16) Masked(mask Mask32x16) Float32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsFloat32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float32x16) Merge(y Float32x16, mask Mask32x16) Float32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsFloat32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x8) Masked(mask Mask64x8) Float64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsFloat64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsFloat64x8() +} diff --git a/src/simd/other_gen_amd64.go b/src/simd/other_gen_amd64.go new file mode 100644 index 00000000000000..ed9394cf7d3951 --- /dev/null +++ b/src/simd/other_gen_amd64.go @@ -0,0 +1,275 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// BroadcastInt8x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt8x16(x int8) Int8x16 { + var z Int8x16 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt16x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt16x8(x int16) Int16x8 { + var z Int16x8 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt32x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt32x4(x int32) Int32x4 { + var z Int32x4 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt64x2 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt64x2(x int64) Int64x2 { + var z Int64x2 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint8x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint8x16(x uint8) Uint8x16 { + var z Uint8x16 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint16x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint16x8(x uint16) Uint16x8 { + var z Uint16x8 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint32x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint32x4(x uint32) Uint32x4 { + var z Uint32x4 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint64x2 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint64x2(x uint64) Uint64x2 { + var z Uint64x2 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastFloat32x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat32x4(x float32) Float32x4 { + var z Float32x4 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastFloat64x2 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat64x2(x float64) Float64x2 { + var z Float64x2 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt8x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt8x32(x int8) Int8x32 { + var z Int8x16 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt16x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt16x16(x int16) Int16x16 { + var z Int16x8 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt32x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt32x8(x int32) Int32x8 { + var z Int32x4 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt64x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt64x4(x int64) Int64x4 { + var z Int64x2 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint8x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint8x32(x uint8) Uint8x32 { + var z Uint8x16 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint16x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint16x16(x uint16) Uint16x16 { + var z Uint16x8 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint32x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint32x8(x uint32) Uint32x8 { + var z Uint32x4 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint64x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint64x4(x uint64) Uint64x4 { + var z Uint64x2 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastFloat32x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat32x8(x float32) Float32x8 { + var z Float32x4 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastFloat64x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat64x4(x float64) Float64x4 { + var z Float64x2 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt8x64 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastInt8x64(x int8) Int8x64 { + var z Int8x16 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastInt16x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastInt16x32(x int16) Int16x32 { + var z Int16x8 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastInt32x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastInt32x16(x int32) Int32x16 { + var z Int32x4 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastInt64x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastInt64x8(x int64) Int64x8 { + var z Int64x2 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint8x64 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastUint8x64(x uint8) Uint8x64 { + var z Uint8x16 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint16x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastUint16x32(x uint16) Uint16x32 { + var z Uint16x8 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint32x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastUint32x16(x uint32) Uint32x16 { + var z Uint32x4 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint64x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastUint64x8(x uint64) Uint64x8 { + var z Uint64x2 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastFloat32x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastFloat32x16(x float32) Float32x16 { + var z Float32x4 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastFloat64x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastFloat64x8(x float64) Float64x8 { + var z Float64x2 + return z.SetElem(0, x).Broadcast512() +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go deleted file mode 100644 index 3ad2672a05b023..00000000000000 --- a/src/simd/slice_amd64.go +++ /dev/null @@ -1,2407 +0,0 @@ -// Code generated by 'go run genfiles.go'; DO NOT EDIT. - -//go:build goexperiment.simd - -package simd - -import "unsafe" - -// LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s -func LoadInt8x16Slice(s []int8) Int8x16 { - return LoadInt8x16((*[16]int8)(s)) -} - -// StoreSlice stores x into a slice of at least 16 int8s -func (x Int8x16) StoreSlice(s []int8) { - x.Store((*[16]int8)(s)) -} - -// LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s -func LoadInt16x8Slice(s []int16) Int16x8 { - return LoadInt16x8((*[8]int16)(s)) -} - -// StoreSlice stores x into a slice of at least 8 int16s -func (x Int16x8) StoreSlice(s []int16) { - x.Store((*[8]int16)(s)) -} - -// LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s -func LoadInt32x4Slice(s []int32) Int32x4 { - return LoadInt32x4((*[4]int32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 int32s -func (x Int32x4) StoreSlice(s []int32) { - x.Store((*[4]int32)(s)) -} - -// LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s -func LoadInt64x2Slice(s []int64) Int64x2 { - return LoadInt64x2((*[2]int64)(s)) -} - -// StoreSlice stores x into a slice of at least 2 int64s -func (x Int64x2) StoreSlice(s []int64) { - x.Store((*[2]int64)(s)) -} - -// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s -func LoadUint8x16Slice(s []uint8) Uint8x16 { - return LoadUint8x16((*[16]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint8s -func (x Uint8x16) StoreSlice(s []uint8) { - x.Store((*[16]uint8)(s)) -} - -// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s -func LoadUint16x8Slice(s []uint16) Uint16x8 { - return LoadUint16x8((*[8]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint16s -func (x Uint16x8) StoreSlice(s []uint16) { - x.Store((*[8]uint16)(s)) -} - -// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s -func LoadUint32x4Slice(s []uint32) Uint32x4 { - return LoadUint32x4((*[4]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 uint32s -func (x Uint32x4) StoreSlice(s []uint32) { - x.Store((*[4]uint32)(s)) -} - -// LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s -func LoadUint64x2Slice(s []uint64) Uint64x2 { - return LoadUint64x2((*[2]uint64)(s)) -} - -// StoreSlice stores x into a slice of at least 2 uint64s -func (x Uint64x2) StoreSlice(s []uint64) { - x.Store((*[2]uint64)(s)) -} - -// LoadFloat32x4Slice loads a Float32x4 from a slice of at least 4 float32s -func LoadFloat32x4Slice(s []float32) Float32x4 { - return LoadFloat32x4((*[4]float32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 float32s -func (x Float32x4) StoreSlice(s []float32) { - x.Store((*[4]float32)(s)) -} - -// LoadFloat64x2Slice loads a Float64x2 from a slice of at least 2 float64s -func LoadFloat64x2Slice(s []float64) Float64x2 { - return LoadFloat64x2((*[2]float64)(s)) -} - -// StoreSlice stores x into a slice of at least 2 float64s -func (x Float64x2) StoreSlice(s []float64) { - x.Store((*[2]float64)(s)) -} - -// LoadInt8x32Slice loads an Int8x32 from a slice of at least 32 int8s -func LoadInt8x32Slice(s []int8) Int8x32 { - return LoadInt8x32((*[32]int8)(s)) -} - -// StoreSlice stores x into a slice of at least 32 int8s -func (x Int8x32) StoreSlice(s []int8) { - x.Store((*[32]int8)(s)) -} - -// LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s -func LoadInt16x16Slice(s []int16) Int16x16 { - return LoadInt16x16((*[16]int16)(s)) -} - -// StoreSlice stores x into a slice of at least 16 int16s -func (x Int16x16) StoreSlice(s []int16) { - x.Store((*[16]int16)(s)) -} - -// LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s -func LoadInt32x8Slice(s []int32) Int32x8 { - return LoadInt32x8((*[8]int32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 int32s -func (x Int32x8) StoreSlice(s []int32) { - x.Store((*[8]int32)(s)) -} - -// LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s -func LoadInt64x4Slice(s []int64) Int64x4 { - return LoadInt64x4((*[4]int64)(s)) -} - -// StoreSlice stores x into a slice of at least 4 int64s -func (x Int64x4) StoreSlice(s []int64) { - x.Store((*[4]int64)(s)) -} - -// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s -func LoadUint8x32Slice(s []uint8) Uint8x32 { - return LoadUint8x32((*[32]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint8s -func (x Uint8x32) StoreSlice(s []uint8) { - x.Store((*[32]uint8)(s)) -} - -// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s -func LoadUint16x16Slice(s []uint16) Uint16x16 { - return LoadUint16x16((*[16]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint16s -func (x Uint16x16) StoreSlice(s []uint16) { - x.Store((*[16]uint16)(s)) -} - -// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s -func LoadUint32x8Slice(s []uint32) Uint32x8 { - return LoadUint32x8((*[8]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint32s -func (x Uint32x8) StoreSlice(s []uint32) { - x.Store((*[8]uint32)(s)) -} - -// LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s -func LoadUint64x4Slice(s []uint64) Uint64x4 { - return LoadUint64x4((*[4]uint64)(s)) -} - -// StoreSlice stores x into a slice of at least 4 uint64s -func (x Uint64x4) StoreSlice(s []uint64) { - x.Store((*[4]uint64)(s)) -} - -// LoadFloat32x8Slice loads a Float32x8 from a slice of at least 8 float32s -func LoadFloat32x8Slice(s []float32) Float32x8 { - return LoadFloat32x8((*[8]float32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 float32s -func (x Float32x8) StoreSlice(s []float32) { - x.Store((*[8]float32)(s)) -} - -// LoadFloat64x4Slice loads a Float64x4 from a slice of at least 4 float64s -func LoadFloat64x4Slice(s []float64) Float64x4 { - return LoadFloat64x4((*[4]float64)(s)) -} - -// StoreSlice stores x into a slice of at least 4 float64s -func (x Float64x4) StoreSlice(s []float64) { - x.Store((*[4]float64)(s)) -} - -// LoadInt8x64Slice loads an Int8x64 from a slice of at least 64 int8s -func LoadInt8x64Slice(s []int8) Int8x64 { - return LoadInt8x64((*[64]int8)(s)) -} - -// StoreSlice stores x into a slice of at least 64 int8s -func (x Int8x64) StoreSlice(s []int8) { - x.Store((*[64]int8)(s)) -} - -// LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s -func LoadInt16x32Slice(s []int16) Int16x32 { - return LoadInt16x32((*[32]int16)(s)) -} - -// StoreSlice stores x into a slice of at least 32 int16s -func (x Int16x32) StoreSlice(s []int16) { - x.Store((*[32]int16)(s)) -} - -// LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s -func LoadInt32x16Slice(s []int32) Int32x16 { - return LoadInt32x16((*[16]int32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 int32s -func (x Int32x16) StoreSlice(s []int32) { - x.Store((*[16]int32)(s)) -} - -// LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s -func LoadInt64x8Slice(s []int64) Int64x8 { - return LoadInt64x8((*[8]int64)(s)) -} - -// StoreSlice stores x into a slice of at least 8 int64s -func (x Int64x8) StoreSlice(s []int64) { - x.Store((*[8]int64)(s)) -} - -// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s -func LoadUint8x64Slice(s []uint8) Uint8x64 { - return LoadUint8x64((*[64]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 64 uint8s -func (x Uint8x64) StoreSlice(s []uint8) { - x.Store((*[64]uint8)(s)) -} - -// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s -func LoadUint16x32Slice(s []uint16) Uint16x32 { - return LoadUint16x32((*[32]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint16s -func (x Uint16x32) StoreSlice(s []uint16) { - x.Store((*[32]uint16)(s)) -} - -// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s -func LoadUint32x16Slice(s []uint32) Uint32x16 { - return LoadUint32x16((*[16]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint32s -func (x Uint32x16) StoreSlice(s []uint32) { - x.Store((*[16]uint32)(s)) -} - -// LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s -func LoadUint64x8Slice(s []uint64) Uint64x8 { - return LoadUint64x8((*[8]uint64)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint64s -func (x Uint64x8) StoreSlice(s []uint64) { - x.Store((*[8]uint64)(s)) -} - -// LoadFloat32x16Slice loads a Float32x16 from a slice of at least 16 float32s -func LoadFloat32x16Slice(s []float32) Float32x16 { - return LoadFloat32x16((*[16]float32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 float32s -func (x Float32x16) StoreSlice(s []float32) { - x.Store((*[16]float32)(s)) -} - -// LoadFloat64x8Slice loads a Float64x8 from a slice of at least 8 float64s -func LoadFloat64x8Slice(s []float64) Float64x8 { - return LoadFloat64x8((*[8]float64)(s)) -} - -// StoreSlice stores x into a slice of at least 8 float64s -func (x Float64x8) StoreSlice(s []float64) { - x.Store((*[8]float64)(s)) -} - -// LoadInt8x64SlicePart loads a Int8x64 from the slice s. -// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. -// If s has 64 or more elements, the function is equivalent to LoadInt8x64Slice. -func LoadInt8x64SlicePart(s []int8) Int8x64 { - l := len(s) - if l >= 64 { - return LoadInt8x64Slice(s) - } - if l == 0 { - var x Int8x64 - return x - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - return LoadMaskedInt8x64(paInt8x64(s), mask) -} - -// StoreSlicePart stores the 64 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 64 or more elements, the method is equivalent to x.StoreSlice. -func (x Int8x64) StoreSlicePart(s []int8) { - l := len(s) - if l >= 64 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - x.StoreMasked(paInt8x64(s), mask) -} - -// LoadInt16x32SlicePart loads a Int16x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadInt16x32Slice. -func LoadInt16x32SlicePart(s []int16) Int16x32 { - l := len(s) - if l >= 32 { - return LoadInt16x32Slice(s) - } - if l == 0 { - var x Int16x32 - return x - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - return LoadMaskedInt16x32(paInt16x32(s), mask) -} - -// StoreSlicePart stores the 32 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Int16x32) StoreSlicePart(s []int16) { - l := len(s) - if l >= 32 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - x.StoreMasked(paInt16x32(s), mask) -} - -// LoadInt32x16SlicePart loads a Int32x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadInt32x16Slice. -func LoadInt32x16SlicePart(s []int32) Int32x16 { - l := len(s) - if l >= 16 { - return LoadInt32x16Slice(s) - } - if l == 0 { - var x Int32x16 - return x - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - return LoadMaskedInt32x16(paInt32x16(s), mask) -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x16) StoreSlicePart(s []int32) { - l := len(s) - if l >= 16 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - x.StoreMasked(paInt32x16(s), mask) -} - -// LoadInt64x8SlicePart loads a Int64x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadInt64x8Slice. -func LoadInt64x8SlicePart(s []int64) Int64x8 { - l := len(s) - if l >= 8 { - return LoadInt64x8Slice(s) - } - if l == 0 { - var x Int64x8 - return x - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedInt64x8(paInt64x8(s), mask) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x8) StoreSlicePart(s []int64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paInt64x8(s), mask) -} - -// LoadUint8x64SlicePart loads a Uint8x64 from the slice s. -// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. -// If s has 64 or more elements, the function is equivalent to LoadUint8x64Slice. -func LoadUint8x64SlicePart(s []uint8) Uint8x64 { - l := len(s) - if l >= 64 { - return LoadUint8x64Slice(s) - } - if l == 0 { - var x Uint8x64 - return x - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - return LoadMaskedUint8x64(paUint8x64(s), mask) -} - -// StoreSlicePart stores the 64 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 64 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x64) StoreSlicePart(s []uint8) { - l := len(s) - if l >= 64 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - x.StoreMasked(paUint8x64(s), mask) -} - -// LoadUint16x32SlicePart loads a Uint16x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadUint16x32Slice. -func LoadUint16x32SlicePart(s []uint16) Uint16x32 { - l := len(s) - if l >= 32 { - return LoadUint16x32Slice(s) - } - if l == 0 { - var x Uint16x32 - return x - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - return LoadMaskedUint16x32(paUint16x32(s), mask) -} - -// StoreSlicePart stores the 32 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x32) StoreSlicePart(s []uint16) { - l := len(s) - if l >= 32 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - x.StoreMasked(paUint16x32(s), mask) -} - -// LoadUint32x16SlicePart loads a Uint32x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint32x16Slice. -func LoadUint32x16SlicePart(s []uint32) Uint32x16 { - l := len(s) - if l >= 16 { - return LoadUint32x16Slice(s) - } - if l == 0 { - var x Uint32x16 - return x - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - return LoadMaskedUint32x16(paUint32x16(s), mask) -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x16) StoreSlicePart(s []uint32) { - l := len(s) - if l >= 16 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - x.StoreMasked(paUint32x16(s), mask) -} - -// LoadUint64x8SlicePart loads a Uint64x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint64x8Slice. -func LoadUint64x8SlicePart(s []uint64) Uint64x8 { - l := len(s) - if l >= 8 { - return LoadUint64x8Slice(s) - } - if l == 0 { - var x Uint64x8 - return x - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedUint64x8(paUint64x8(s), mask) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x8) StoreSlicePart(s []uint64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paUint64x8(s), mask) -} - -// LoadFloat32x16SlicePart loads a Float32x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadFloat32x16Slice. -func LoadFloat32x16SlicePart(s []float32) Float32x16 { - l := len(s) - if l >= 16 { - return LoadFloat32x16Slice(s) - } - if l == 0 { - var x Float32x16 - return x - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - return LoadMaskedFloat32x16(paFloat32x16(s), mask) -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x16) StoreSlicePart(s []float32) { - l := len(s) - if l >= 16 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - x.StoreMasked(paFloat32x16(s), mask) -} - -// LoadFloat64x8SlicePart loads a Float64x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadFloat64x8Slice. -func LoadFloat64x8SlicePart(s []float64) Float64x8 { - l := len(s) - if l >= 8 { - return LoadFloat64x8Slice(s) - } - if l == 0 { - var x Float64x8 - return x - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedFloat64x8(paFloat64x8(s), mask) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x8) StoreSlicePart(s []float64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paFloat64x8(s), mask) -} - -// LoadInt32x4SlicePart loads a Int32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. -func LoadInt32x4SlicePart(s []int32) Int32x4 { - l := len(s) - if l >= 4 { - return LoadInt32x4Slice(s) - } - if l == 0 { - var x Int32x4 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x4) StoreSlicePart(s []int32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadInt64x2SlicePart loads a Int64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. -func LoadInt64x2SlicePart(s []int64) Int64x2 { - l := len(s) - if l >= 2 { - return LoadInt64x2Slice(s) - } - if l == 0 { - var x Int64x2 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the 2 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x2) StoreSlicePart(s []int64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. -func LoadUint32x4SlicePart(s []uint32) Uint32x4 { - l := len(s) - if l >= 4 { - return LoadUint32x4Slice(s) - } - if l == 0 { - var x Uint32x4 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x4) StoreSlicePart(s []uint32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. -func LoadUint64x2SlicePart(s []uint64) Uint64x2 { - l := len(s) - if l >= 2 { - return LoadUint64x2Slice(s) - } - if l == 0 { - var x Uint64x2 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the 2 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x2) StoreSlicePart(s []uint64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. -func LoadFloat32x4SlicePart(s []float32) Float32x4 { - l := len(s) - if l >= 4 { - return LoadFloat32x4Slice(s) - } - if l == 0 { - var x Float32x4 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x4) StoreSlicePart(s []float32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. -func LoadFloat64x2SlicePart(s []float64) Float64x2 { - l := len(s) - if l >= 2 { - return LoadFloat64x2Slice(s) - } - if l == 0 { - var x Float64x2 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the 2 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x2) StoreSlicePart(s []float64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadInt32x8SlicePart loads a Int32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. -func LoadInt32x8SlicePart(s []int32) Int32x8 { - l := len(s) - if l >= 8 { - return LoadInt32x8Slice(s) - } - if l == 0 { - var x Int32x8 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x8) StoreSlicePart(s []int32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadInt64x4SlicePart loads a Int64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. -func LoadInt64x4SlicePart(s []int64) Int64x4 { - l := len(s) - if l >= 4 { - return LoadInt64x4Slice(s) - } - if l == 0 { - var x Int64x4 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x4) StoreSlicePart(s []int64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. -func LoadUint32x8SlicePart(s []uint32) Uint32x8 { - l := len(s) - if l >= 8 { - return LoadUint32x8Slice(s) - } - if l == 0 { - var x Uint32x8 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x8) StoreSlicePart(s []uint32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. -func LoadUint64x4SlicePart(s []uint64) Uint64x4 { - l := len(s) - if l >= 4 { - return LoadUint64x4Slice(s) - } - if l == 0 { - var x Uint64x4 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x4) StoreSlicePart(s []uint64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. -func LoadFloat32x8SlicePart(s []float32) Float32x8 { - l := len(s) - if l >= 8 { - return LoadFloat32x8Slice(s) - } - if l == 0 { - var x Float32x8 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x8) StoreSlicePart(s []float32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. -func LoadFloat64x4SlicePart(s []float64) Float64x4 { - l := len(s) - if l >= 4 { - return LoadFloat64x4Slice(s) - } - if l == 0 { - var x Float64x4 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x4) StoreSlicePart(s []float64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. -func LoadUint8x16SlicePart(s []uint8) Uint8x16 { - if len(s) == 0 { - var zero Uint8x16 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x16SlicePart(t).AsUint8x16() -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x16) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x16().StoreSlicePart(t) -} - -// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. -func LoadUint16x8SlicePart(s []uint16) Uint16x8 { - if len(s) == 0 { - var zero Uint16x8 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x8SlicePart(t).AsUint16x8() -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x8) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x8().StoreSlicePart(t) -} - -// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. -func LoadUint8x32SlicePart(s []uint8) Uint8x32 { - if len(s) == 0 { - var zero Uint8x32 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x32SlicePart(t).AsUint8x32() -} - -// StoreSlicePart stores the 32 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x32) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x32().StoreSlicePart(t) -} - -// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. -func LoadUint16x16SlicePart(s []uint16) Uint16x16 { - if len(s) == 0 { - var zero Uint16x16 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x16SlicePart(t).AsUint16x16() -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x16) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x16().StoreSlicePart(t) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int8x16) Masked(mask Mask8x16) Int8x16 { - im := mask.AsInt8x16() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int8x16) Merge(y Int8x16, mask Mask8x16) Int8x16 { - im := mask.AsInt8x16() - return y.blend(x, im) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int16x8) Masked(mask Mask16x8) Int16x8 { - im := mask.AsInt16x8() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int16x8) Merge(y Int16x8, mask Mask16x8) Int16x8 { - im := mask.AsInt16x8().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsInt16x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int32x4) Masked(mask Mask32x4) Int32x4 { - im := mask.AsInt32x4() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int32x4) Merge(y Int32x4, mask Mask32x4) Int32x4 { - im := mask.AsInt32x4().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsInt32x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int64x2) Masked(mask Mask64x2) Int64x2 { - im := mask.AsInt64x2() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int64x2) Merge(y Int64x2, mask Mask64x2) Int64x2 { - im := mask.AsInt64x2().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsInt64x2() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint8x16) Masked(mask Mask8x16) Uint8x16 { - im := mask.AsInt8x16() - return x.AsInt8x16().And(im).AsUint8x16() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint8x16) Merge(y Uint8x16, mask Mask8x16) Uint8x16 { - im := mask.AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint8x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint16x8) Masked(mask Mask16x8) Uint16x8 { - im := mask.AsInt16x8() - return x.AsInt16x8().And(im).AsUint16x8() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint16x8) Merge(y Uint16x8, mask Mask16x8) Uint16x8 { - im := mask.AsInt16x8().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint16x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint32x4) Masked(mask Mask32x4) Uint32x4 { - im := mask.AsInt32x4() - return x.AsInt32x4().And(im).AsUint32x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint32x4) Merge(y Uint32x4, mask Mask32x4) Uint32x4 { - im := mask.AsInt32x4().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint32x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint64x2) Masked(mask Mask64x2) Uint64x2 { - im := mask.AsInt64x2() - return x.AsInt64x2().And(im).AsUint64x2() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint64x2) Merge(y Uint64x2, mask Mask64x2) Uint64x2 { - im := mask.AsInt64x2().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint64x2() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float32x4) Masked(mask Mask32x4) Float32x4 { - im := mask.AsInt32x4() - return x.AsInt32x4().And(im).AsFloat32x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float32x4) Merge(y Float32x4, mask Mask32x4) Float32x4 { - im := mask.AsInt32x4().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsFloat32x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float64x2) Masked(mask Mask64x2) Float64x2 { - im := mask.AsInt64x2() - return x.AsInt64x2().And(im).AsFloat64x2() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float64x2) Merge(y Float64x2, mask Mask64x2) Float64x2 { - im := mask.AsInt64x2().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsFloat64x2() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int8x32) Masked(mask Mask8x32) Int8x32 { - im := mask.AsInt8x32() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int8x32) Merge(y Int8x32, mask Mask8x32) Int8x32 { - im := mask.AsInt8x32() - return y.blend(x, im) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int16x16) Masked(mask Mask16x16) Int16x16 { - im := mask.AsInt16x16() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int16x16) Merge(y Int16x16, mask Mask16x16) Int16x16 { - im := mask.AsInt16x16().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsInt16x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int32x8) Masked(mask Mask32x8) Int32x8 { - im := mask.AsInt32x8() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int32x8) Merge(y Int32x8, mask Mask32x8) Int32x8 { - im := mask.AsInt32x8().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsInt32x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int64x4) Masked(mask Mask64x4) Int64x4 { - im := mask.AsInt64x4() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int64x4) Merge(y Int64x4, mask Mask64x4) Int64x4 { - im := mask.AsInt64x4().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsInt64x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint8x32) Masked(mask Mask8x32) Uint8x32 { - im := mask.AsInt8x32() - return x.AsInt8x32().And(im).AsUint8x32() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint8x32) Merge(y Uint8x32, mask Mask8x32) Uint8x32 { - im := mask.AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint8x32() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint16x16) Masked(mask Mask16x16) Uint16x16 { - im := mask.AsInt16x16() - return x.AsInt16x16().And(im).AsUint16x16() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint16x16) Merge(y Uint16x16, mask Mask16x16) Uint16x16 { - im := mask.AsInt16x16().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint16x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint32x8) Masked(mask Mask32x8) Uint32x8 { - im := mask.AsInt32x8() - return x.AsInt32x8().And(im).AsUint32x8() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint32x8) Merge(y Uint32x8, mask Mask32x8) Uint32x8 { - im := mask.AsInt32x8().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint32x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint64x4) Masked(mask Mask64x4) Uint64x4 { - im := mask.AsInt64x4() - return x.AsInt64x4().And(im).AsUint64x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint64x4) Merge(y Uint64x4, mask Mask64x4) Uint64x4 { - im := mask.AsInt64x4().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint64x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float32x8) Masked(mask Mask32x8) Float32x8 { - im := mask.AsInt32x8() - return x.AsInt32x8().And(im).AsFloat32x8() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float32x8) Merge(y Float32x8, mask Mask32x8) Float32x8 { - im := mask.AsInt32x8().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsFloat32x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float64x4) Masked(mask Mask64x4) Float64x4 { - im := mask.AsInt64x4() - return x.AsInt64x4().And(im).AsFloat64x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float64x4) Merge(y Float64x4, mask Mask64x4) Float64x4 { - im := mask.AsInt64x4().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsFloat64x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int8x64) Masked(mask Mask8x64) Int8x64 { - im := mask.AsInt8x64() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int8x64) Merge(y Int8x64, mask Mask8x64) Int8x64 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int16x32) Masked(mask Mask16x32) Int16x32 { - im := mask.AsInt16x32() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int16x32) Merge(y Int16x32, mask Mask16x32) Int16x32 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int32x16) Masked(mask Mask32x16) Int32x16 { - im := mask.AsInt32x16() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int32x16) Merge(y Int32x16, mask Mask32x16) Int32x16 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int64x8) Masked(mask Mask64x8) Int64x8 { - im := mask.AsInt64x8() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int64x8) Merge(y Int64x8, mask Mask64x8) Int64x8 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint8x64) Masked(mask Mask8x64) Uint8x64 { - im := mask.AsInt8x64() - return x.AsInt8x64().And(im).AsUint8x64() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint8x64) Merge(y Uint8x64, mask Mask8x64) Uint8x64 { - ix := x.AsInt8x64() - iy := y.AsInt8x64() - return iy.blendMasked(ix, mask).AsUint8x64() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint16x32) Masked(mask Mask16x32) Uint16x32 { - im := mask.AsInt16x32() - return x.AsInt16x32().And(im).AsUint16x32() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint16x32) Merge(y Uint16x32, mask Mask16x32) Uint16x32 { - ix := x.AsInt16x32() - iy := y.AsInt16x32() - return iy.blendMasked(ix, mask).AsUint16x32() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint32x16) Masked(mask Mask32x16) Uint32x16 { - im := mask.AsInt32x16() - return x.AsInt32x16().And(im).AsUint32x16() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint32x16) Merge(y Uint32x16, mask Mask32x16) Uint32x16 { - ix := x.AsInt32x16() - iy := y.AsInt32x16() - return iy.blendMasked(ix, mask).AsUint32x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint64x8) Masked(mask Mask64x8) Uint64x8 { - im := mask.AsInt64x8() - return x.AsInt64x8().And(im).AsUint64x8() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint64x8) Merge(y Uint64x8, mask Mask64x8) Uint64x8 { - ix := x.AsInt64x8() - iy := y.AsInt64x8() - return iy.blendMasked(ix, mask).AsUint64x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float32x16) Masked(mask Mask32x16) Float32x16 { - im := mask.AsInt32x16() - return x.AsInt32x16().And(im).AsFloat32x16() -} - -// Merge returns x but with elements set to y where m is false. -func (x Float32x16) Merge(y Float32x16, mask Mask32x16) Float32x16 { - ix := x.AsInt32x16() - iy := y.AsInt32x16() - return iy.blendMasked(ix, mask).AsFloat32x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float64x8) Masked(mask Mask64x8) Float64x8 { - im := mask.AsInt64x8() - return x.AsInt64x8().And(im).AsFloat64x8() -} - -// Merge returns x but with elements set to y where m is false. -func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { - ix := x.AsInt64x8() - iy := y.AsInt64x8() - return iy.blendMasked(ix, mask).AsFloat64x8() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int8x16) Less(y Int8x16) Mask8x16 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { - ones := x.Equal(x).AsInt8x16() - return y.Greater(x).AsInt8x16().Xor(ones).AsMask8x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { - ones := x.Equal(x).AsInt8x16() - return x.Greater(y).AsInt8x16().Xor(ones).AsMask8x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 { - ones := x.Equal(x).AsInt8x16() - return x.Equal(y).AsInt8x16().Xor(ones).AsMask8x16() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int16x8) Less(y Int16x8) Mask16x8 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { - ones := x.Equal(x).AsInt16x8() - return y.Greater(x).AsInt16x8().Xor(ones).AsMask16x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { - ones := x.Equal(x).AsInt16x8() - return x.Greater(y).AsInt16x8().Xor(ones).AsMask16x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 { - ones := x.Equal(x).AsInt16x8() - return x.Equal(y).AsInt16x8().Xor(ones).AsMask16x8() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int32x4) Less(y Int32x4) Mask32x4 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { - ones := x.Equal(x).AsInt32x4() - return y.Greater(x).AsInt32x4().Xor(ones).AsMask32x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { - ones := x.Equal(x).AsInt32x4() - return x.Greater(y).AsInt32x4().Xor(ones).AsMask32x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 { - ones := x.Equal(x).AsInt32x4() - return x.Equal(y).AsInt32x4().Xor(ones).AsMask32x4() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int64x2) Less(y Int64x2) Mask64x2 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { - ones := x.Equal(x).AsInt64x2() - return y.Greater(x).AsInt64x2().Xor(ones).AsMask64x2() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { - ones := x.Equal(x).AsInt64x2() - return x.Greater(y).AsInt64x2().Xor(ones).AsMask64x2() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 { - ones := x.Equal(x).AsInt64x2() - return x.Equal(y).AsInt64x2().Xor(ones).AsMask64x2() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) Less(y Int8x32) Mask8x32 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { - ones := x.Equal(x).AsInt8x32() - return y.Greater(x).AsInt8x32().Xor(ones).AsMask8x32() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { - ones := x.Equal(x).AsInt8x32() - return x.Greater(y).AsInt8x32().Xor(ones).AsMask8x32() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 { - ones := x.Equal(x).AsInt8x32() - return x.Equal(y).AsInt8x32().Xor(ones).AsMask8x32() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) Less(y Int16x16) Mask16x16 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { - ones := x.Equal(x).AsInt16x16() - return y.Greater(x).AsInt16x16().Xor(ones).AsMask16x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { - ones := x.Equal(x).AsInt16x16() - return x.Greater(y).AsInt16x16().Xor(ones).AsMask16x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 { - ones := x.Equal(x).AsInt16x16() - return x.Equal(y).AsInt16x16().Xor(ones).AsMask16x16() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) Less(y Int32x8) Mask32x8 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { - ones := x.Equal(x).AsInt32x8() - return y.Greater(x).AsInt32x8().Xor(ones).AsMask32x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { - ones := x.Equal(x).AsInt32x8() - return x.Greater(y).AsInt32x8().Xor(ones).AsMask32x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 { - ones := x.Equal(x).AsInt32x8() - return x.Equal(y).AsInt32x8().Xor(ones).AsMask32x8() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) Less(y Int64x4) Mask64x4 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { - ones := x.Equal(x).AsInt64x4() - return y.Greater(x).AsInt64x4().Xor(ones).AsMask64x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { - ones := x.Equal(x).AsInt64x4() - return x.Greater(y).AsInt64x4().Xor(ones).AsMask64x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 { - ones := x.Equal(x).AsInt64x4() - return x.Equal(y).AsInt64x4().Xor(ones).AsMask64x4() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) Greater(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) Less(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - ones := x.Equal(x).AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - ones := x.Equal(x).AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - ones := x.Equal(x).AsInt8x16() - return a.Equal(b).AsInt8x16().Xor(ones).AsMask8x16() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) Greater(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) Less(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - return a.Equal(b).AsInt16x8().Xor(ones).AsMask16x8() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) Greater(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) Less(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - return a.Equal(b).AsInt32x4().Xor(ones).AsMask32x4() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) Greater(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) Less(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - return a.Equal(b).AsInt64x2().Xor(ones).AsMask64x2() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) Greater(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) Less(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - ones := x.Equal(x).AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - ones := x.Equal(x).AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - ones := x.Equal(x).AsInt8x32() - return a.Equal(b).AsInt8x32().Xor(ones).AsMask8x32() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) Greater(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) Less(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - return a.Equal(b).AsInt16x16().Xor(ones).AsMask16x16() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) Greater(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) Less(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - return a.Equal(b).AsInt32x8().Xor(ones).AsMask32x8() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) Greater(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) Less(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - return a.Equal(b).AsInt64x4().Xor(ones).AsMask64x4() -} - -// BroadcastInt8x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt8x16(x int8) Int8x16 { - var z Int8x16 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt16x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt16x8(x int16) Int16x8 { - var z Int16x8 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt32x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt32x4(x int32) Int32x4 { - var z Int32x4 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt64x2 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt64x2(x int64) Int64x2 { - var z Int64x2 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint8x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint8x16(x uint8) Uint8x16 { - var z Uint8x16 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint16x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint16x8(x uint16) Uint16x8 { - var z Uint16x8 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint32x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint32x4(x uint32) Uint32x4 { - var z Uint32x4 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint64x2 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint64x2(x uint64) Uint64x2 { - var z Uint64x2 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastFloat32x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat32x4(x float32) Float32x4 { - var z Float32x4 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastFloat64x2 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat64x2(x float64) Float64x2 { - var z Float64x2 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt8x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt8x32(x int8) Int8x32 { - var z Int8x16 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt16x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt16x16(x int16) Int16x16 { - var z Int16x8 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt32x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt32x8(x int32) Int32x8 { - var z Int32x4 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt64x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt64x4(x int64) Int64x4 { - var z Int64x2 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint8x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint8x32(x uint8) Uint8x32 { - var z Uint8x16 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint16x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint16x16(x uint16) Uint16x16 { - var z Uint16x8 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint32x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint32x8(x uint32) Uint32x8 { - var z Uint32x4 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint64x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint64x4(x uint64) Uint64x4 { - var z Uint64x2 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastFloat32x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat32x8(x float32) Float32x8 { - var z Float32x4 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastFloat64x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat64x4(x float64) Float64x4 { - var z Float64x2 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt8x64 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastInt8x64(x int8) Int8x64 { - var z Int8x16 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastInt16x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastInt16x32(x int16) Int16x32 { - var z Int16x8 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastInt32x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastInt32x16(x int32) Int32x16 { - var z Int32x4 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastInt64x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastInt64x8(x int64) Int64x8 { - var z Int64x2 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint8x64 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastUint8x64(x uint8) Uint8x64 { - var z Uint8x16 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint16x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastUint16x32(x uint16) Uint16x32 { - var z Uint16x8 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint32x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastUint32x16(x uint32) Uint32x16 { - var z Uint32x4 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint64x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastUint64x8(x uint64) Uint64x8 { - var z Uint64x2 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastFloat32x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastFloat32x16(x float32) Float32x16 { - var z Float32x4 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastFloat64x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastFloat64x8(x float64) Float64x8 { - var z Float64x2 - return z.SetElem(0, x).Broadcast512() -} diff --git a/src/simd/slice_gen_amd64.go b/src/simd/slice_gen_amd64.go new file mode 100644 index 00000000000000..45e95be9bf9765 --- /dev/null +++ b/src/simd/slice_gen_amd64.go @@ -0,0 +1,1103 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +import "unsafe" + +// LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s +func LoadInt8x16Slice(s []int8) Int8x16 { + return LoadInt8x16((*[16]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int8s +func (x Int8x16) StoreSlice(s []int8) { + x.Store((*[16]int8)(s)) +} + +// LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s +func LoadInt16x8Slice(s []int16) Int16x8 { + return LoadInt16x8((*[8]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int16s +func (x Int16x8) StoreSlice(s []int16) { + x.Store((*[8]int16)(s)) +} + +// LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s +func LoadInt32x4Slice(s []int32) Int32x4 { + return LoadInt32x4((*[4]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int32s +func (x Int32x4) StoreSlice(s []int32) { + x.Store((*[4]int32)(s)) +} + +// LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s +func LoadInt64x2Slice(s []int64) Int64x2 { + return LoadInt64x2((*[2]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 int64s +func (x Int64x2) StoreSlice(s []int64) { + x.Store((*[2]int64)(s)) +} + +// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s +func LoadUint8x16Slice(s []uint8) Uint8x16 { + return LoadUint8x16((*[16]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint8s +func (x Uint8x16) StoreSlice(s []uint8) { + x.Store((*[16]uint8)(s)) +} + +// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s +func LoadUint16x8Slice(s []uint16) Uint16x8 { + return LoadUint16x8((*[8]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint16s +func (x Uint16x8) StoreSlice(s []uint16) { + x.Store((*[8]uint16)(s)) +} + +// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s +func LoadUint32x4Slice(s []uint32) Uint32x4 { + return LoadUint32x4((*[4]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint32s +func (x Uint32x4) StoreSlice(s []uint32) { + x.Store((*[4]uint32)(s)) +} + +// LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s +func LoadUint64x2Slice(s []uint64) Uint64x2 { + return LoadUint64x2((*[2]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 uint64s +func (x Uint64x2) StoreSlice(s []uint64) { + x.Store((*[2]uint64)(s)) +} + +// LoadFloat32x4Slice loads a Float32x4 from a slice of at least 4 float32s +func LoadFloat32x4Slice(s []float32) Float32x4 { + return LoadFloat32x4((*[4]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float32s +func (x Float32x4) StoreSlice(s []float32) { + x.Store((*[4]float32)(s)) +} + +// LoadFloat64x2Slice loads a Float64x2 from a slice of at least 2 float64s +func LoadFloat64x2Slice(s []float64) Float64x2 { + return LoadFloat64x2((*[2]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 float64s +func (x Float64x2) StoreSlice(s []float64) { + x.Store((*[2]float64)(s)) +} + +// LoadInt8x32Slice loads an Int8x32 from a slice of at least 32 int8s +func LoadInt8x32Slice(s []int8) Int8x32 { + return LoadInt8x32((*[32]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int8s +func (x Int8x32) StoreSlice(s []int8) { + x.Store((*[32]int8)(s)) +} + +// LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s +func LoadInt16x16Slice(s []int16) Int16x16 { + return LoadInt16x16((*[16]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int16s +func (x Int16x16) StoreSlice(s []int16) { + x.Store((*[16]int16)(s)) +} + +// LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s +func LoadInt32x8Slice(s []int32) Int32x8 { + return LoadInt32x8((*[8]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int32s +func (x Int32x8) StoreSlice(s []int32) { + x.Store((*[8]int32)(s)) +} + +// LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s +func LoadInt64x4Slice(s []int64) Int64x4 { + return LoadInt64x4((*[4]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int64s +func (x Int64x4) StoreSlice(s []int64) { + x.Store((*[4]int64)(s)) +} + +// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s +func LoadUint8x32Slice(s []uint8) Uint8x32 { + return LoadUint8x32((*[32]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint8s +func (x Uint8x32) StoreSlice(s []uint8) { + x.Store((*[32]uint8)(s)) +} + +// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s +func LoadUint16x16Slice(s []uint16) Uint16x16 { + return LoadUint16x16((*[16]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint16s +func (x Uint16x16) StoreSlice(s []uint16) { + x.Store((*[16]uint16)(s)) +} + +// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s +func LoadUint32x8Slice(s []uint32) Uint32x8 { + return LoadUint32x8((*[8]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint32s +func (x Uint32x8) StoreSlice(s []uint32) { + x.Store((*[8]uint32)(s)) +} + +// LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s +func LoadUint64x4Slice(s []uint64) Uint64x4 { + return LoadUint64x4((*[4]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint64s +func (x Uint64x4) StoreSlice(s []uint64) { + x.Store((*[4]uint64)(s)) +} + +// LoadFloat32x8Slice loads a Float32x8 from a slice of at least 8 float32s +func LoadFloat32x8Slice(s []float32) Float32x8 { + return LoadFloat32x8((*[8]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float32s +func (x Float32x8) StoreSlice(s []float32) { + x.Store((*[8]float32)(s)) +} + +// LoadFloat64x4Slice loads a Float64x4 from a slice of at least 4 float64s +func LoadFloat64x4Slice(s []float64) Float64x4 { + return LoadFloat64x4((*[4]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float64s +func (x Float64x4) StoreSlice(s []float64) { + x.Store((*[4]float64)(s)) +} + +// LoadInt8x64Slice loads an Int8x64 from a slice of at least 64 int8s +func LoadInt8x64Slice(s []int8) Int8x64 { + return LoadInt8x64((*[64]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 int8s +func (x Int8x64) StoreSlice(s []int8) { + x.Store((*[64]int8)(s)) +} + +// LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s +func LoadInt16x32Slice(s []int16) Int16x32 { + return LoadInt16x32((*[32]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int16s +func (x Int16x32) StoreSlice(s []int16) { + x.Store((*[32]int16)(s)) +} + +// LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s +func LoadInt32x16Slice(s []int32) Int32x16 { + return LoadInt32x16((*[16]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int32s +func (x Int32x16) StoreSlice(s []int32) { + x.Store((*[16]int32)(s)) +} + +// LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s +func LoadInt64x8Slice(s []int64) Int64x8 { + return LoadInt64x8((*[8]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int64s +func (x Int64x8) StoreSlice(s []int64) { + x.Store((*[8]int64)(s)) +} + +// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s +func LoadUint8x64Slice(s []uint8) Uint8x64 { + return LoadUint8x64((*[64]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 uint8s +func (x Uint8x64) StoreSlice(s []uint8) { + x.Store((*[64]uint8)(s)) +} + +// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s +func LoadUint16x32Slice(s []uint16) Uint16x32 { + return LoadUint16x32((*[32]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint16s +func (x Uint16x32) StoreSlice(s []uint16) { + x.Store((*[32]uint16)(s)) +} + +// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s +func LoadUint32x16Slice(s []uint32) Uint32x16 { + return LoadUint32x16((*[16]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint32s +func (x Uint32x16) StoreSlice(s []uint32) { + x.Store((*[16]uint32)(s)) +} + +// LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s +func LoadUint64x8Slice(s []uint64) Uint64x8 { + return LoadUint64x8((*[8]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint64s +func (x Uint64x8) StoreSlice(s []uint64) { + x.Store((*[8]uint64)(s)) +} + +// LoadFloat32x16Slice loads a Float32x16 from a slice of at least 16 float32s +func LoadFloat32x16Slice(s []float32) Float32x16 { + return LoadFloat32x16((*[16]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 float32s +func (x Float32x16) StoreSlice(s []float32) { + x.Store((*[16]float32)(s)) +} + +// LoadFloat64x8Slice loads a Float64x8 from a slice of at least 8 float64s +func LoadFloat64x8Slice(s []float64) Float64x8 { + return LoadFloat64x8((*[8]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float64s +func (x Float64x8) StoreSlice(s []float64) { + x.Store((*[8]float64)(s)) +} + +// LoadInt8x64SlicePart loads a Int8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadInt8x64Slice. +func LoadInt8x64SlicePart(s []int8) Int8x64 { + l := len(s) + if l >= 64 { + return LoadInt8x64Slice(s) + } + if l == 0 { + var x Int8x64 + return x + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedInt8x64(paInt8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x64) StoreSlicePart(s []int8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paInt8x64(s), mask) +} + +// LoadInt16x32SlicePart loads a Int16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadInt16x32Slice. +func LoadInt16x32SlicePart(s []int16) Int16x32 { + l := len(s) + if l >= 32 { + return LoadInt16x32Slice(s) + } + if l == 0 { + var x Int16x32 + return x + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedInt16x32(paInt16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x32) StoreSlicePart(s []int16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paInt16x32(s), mask) +} + +// LoadInt32x16SlicePart loads a Int32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt32x16Slice. +func LoadInt32x16SlicePart(s []int32) Int32x16 { + l := len(s) + if l >= 16 { + return LoadInt32x16Slice(s) + } + if l == 0 { + var x Int32x16 + return x + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedInt32x16(paInt32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x16) StoreSlicePart(s []int32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paInt32x16(s), mask) +} + +// LoadInt64x8SlicePart loads a Int64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt64x8Slice. +func LoadInt64x8SlicePart(s []int64) Int64x8 { + l := len(s) + if l >= 8 { + return LoadInt64x8Slice(s) + } + if l == 0 { + var x Int64x8 + return x + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedInt64x8(paInt64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x8) StoreSlicePart(s []int64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paInt64x8(s), mask) +} + +// LoadUint8x64SlicePart loads a Uint8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadUint8x64Slice. +func LoadUint8x64SlicePart(s []uint8) Uint8x64 { + l := len(s) + if l >= 64 { + return LoadUint8x64Slice(s) + } + if l == 0 { + var x Uint8x64 + return x + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedUint8x64(paUint8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x64) StoreSlicePart(s []uint8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paUint8x64(s), mask) +} + +// LoadUint16x32SlicePart loads a Uint16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint16x32Slice. +func LoadUint16x32SlicePart(s []uint16) Uint16x32 { + l := len(s) + if l >= 32 { + return LoadUint16x32Slice(s) + } + if l == 0 { + var x Uint16x32 + return x + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedUint16x32(paUint16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x32) StoreSlicePart(s []uint16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paUint16x32(s), mask) +} + +// LoadUint32x16SlicePart loads a Uint32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint32x16Slice. +func LoadUint32x16SlicePart(s []uint32) Uint32x16 { + l := len(s) + if l >= 16 { + return LoadUint32x16Slice(s) + } + if l == 0 { + var x Uint32x16 + return x + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedUint32x16(paUint32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x16) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paUint32x16(s), mask) +} + +// LoadUint64x8SlicePart loads a Uint64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint64x8Slice. +func LoadUint64x8SlicePart(s []uint64) Uint64x8 { + l := len(s) + if l >= 8 { + return LoadUint64x8Slice(s) + } + if l == 0 { + var x Uint64x8 + return x + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedUint64x8(paUint64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x8) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paUint64x8(s), mask) +} + +// LoadFloat32x16SlicePart loads a Float32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadFloat32x16Slice. +func LoadFloat32x16SlicePart(s []float32) Float32x16 { + l := len(s) + if l >= 16 { + return LoadFloat32x16Slice(s) + } + if l == 0 { + var x Float32x16 + return x + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedFloat32x16(paFloat32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x16) StoreSlicePart(s []float32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paFloat32x16(s), mask) +} + +// LoadFloat64x8SlicePart loads a Float64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat64x8Slice. +func LoadFloat64x8SlicePart(s []float64) Float64x8 { + l := len(s) + if l >= 8 { + return LoadFloat64x8Slice(s) + } + if l == 0 { + var x Float64x8 + return x + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedFloat64x8(paFloat64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x8) StoreSlicePart(s []float64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paFloat64x8(s), mask) +} + +// LoadInt32x4SlicePart loads a Int32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. +func LoadInt32x4SlicePart(s []int32) Int32x4 { + l := len(s) + if l >= 4 { + return LoadInt32x4Slice(s) + } + if l == 0 { + var x Int32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x4) StoreSlicePart(s []int32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadInt64x2SlicePart loads a Int64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. +func LoadInt64x2SlicePart(s []int64) Int64x2 { + l := len(s) + if l >= 2 { + return LoadInt64x2Slice(s) + } + if l == 0 { + var x Int64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x2) StoreSlicePart(s []int64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. +func LoadUint32x4SlicePart(s []uint32) Uint32x4 { + l := len(s) + if l >= 4 { + return LoadUint32x4Slice(s) + } + if l == 0 { + var x Uint32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x4) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. +func LoadUint64x2SlicePart(s []uint64) Uint64x2 { + l := len(s) + if l >= 2 { + return LoadUint64x2Slice(s) + } + if l == 0 { + var x Uint64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x2) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. +func LoadFloat32x4SlicePart(s []float32) Float32x4 { + l := len(s) + if l >= 4 { + return LoadFloat32x4Slice(s) + } + if l == 0 { + var x Float32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x4) StoreSlicePart(s []float32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. +func LoadFloat64x2SlicePart(s []float64) Float64x2 { + l := len(s) + if l >= 2 { + return LoadFloat64x2Slice(s) + } + if l == 0 { + var x Float64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x2) StoreSlicePart(s []float64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadInt32x8SlicePart loads a Int32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. +func LoadInt32x8SlicePart(s []int32) Int32x8 { + l := len(s) + if l >= 8 { + return LoadInt32x8Slice(s) + } + if l == 0 { + var x Int32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x8) StoreSlicePart(s []int32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadInt64x4SlicePart loads a Int64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. +func LoadInt64x4SlicePart(s []int64) Int64x4 { + l := len(s) + if l >= 4 { + return LoadInt64x4Slice(s) + } + if l == 0 { + var x Int64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x4) StoreSlicePart(s []int64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. +func LoadUint32x8SlicePart(s []uint32) Uint32x8 { + l := len(s) + if l >= 8 { + return LoadUint32x8Slice(s) + } + if l == 0 { + var x Uint32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x8) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. +func LoadUint64x4SlicePart(s []uint64) Uint64x4 { + l := len(s) + if l >= 4 { + return LoadUint64x4Slice(s) + } + if l == 0 { + var x Uint64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x4) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. +func LoadFloat32x8SlicePart(s []float32) Float32x8 { + l := len(s) + if l >= 8 { + return LoadFloat32x8Slice(s) + } + if l == 0 { + var x Float32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x8) StoreSlicePart(s []float32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. +func LoadFloat64x4SlicePart(s []float64) Float64x4 { + l := len(s) + if l >= 4 { + return LoadFloat64x4Slice(s) + } + if l == 0 { + var x Float64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x4) StoreSlicePart(s []float64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. +func LoadUint8x16SlicePart(s []uint8) Uint8x16 { + if len(s) == 0 { + var zero Uint8x16 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x16SlicePart(t).AsUint8x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x16) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x16().StoreSlicePart(t) +} + +// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. +func LoadUint16x8SlicePart(s []uint16) Uint16x8 { + if len(s) == 0 { + var zero Uint16x8 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x8SlicePart(t).AsUint16x8() +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x8) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x8().StoreSlicePart(t) +} + +// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. +func LoadUint8x32SlicePart(s []uint8) Uint8x32 { + if len(s) == 0 { + var zero Uint8x32 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x32SlicePart(t).AsUint8x32() +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x32) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x32().StoreSlicePart(t) +} + +// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. +func LoadUint16x16SlicePart(s []uint16) Uint16x16 { + if len(s) == 0 { + var zero Uint16x16 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x16SlicePart(t).AsUint16x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x16) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x16().StoreSlicePart(t) +} From 257c1356ecd7a15830eabe17a6d42878a8538cfd Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 13 Aug 2025 15:47:38 -0400 Subject: [PATCH 132/139] [dev.simd] go/types: exclude simd/_gen module from TestStdlib We're about to add a small simd/_gen submodule that imports external dependencies. Exclude it from TestStdlib since it won't be able to follow those dependencies. Change-Id: I29a1adc98d141b9c511aa29e1992fab2248747d5 Reviewed-on: https://go-review.googlesource.com/c/go/+/695976 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/types2/stdlib_test.go | 2 ++ src/go/types/stdlib_test.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go index 35e15d814defbb..66f27b7829018b 100644 --- a/src/cmd/compile/internal/types2/stdlib_test.go +++ b/src/cmd/compile/internal/types2/stdlib_test.go @@ -358,6 +358,8 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "simd/_gen/simdgen": true, + "simd/_gen/unify": true, } // printPackageMu synchronizes the printing of type-checked package files in diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 8e95d23ec38375..c3fddbf918956f 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -360,6 +360,8 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "simd/_gen/simdgen": true, + "simd/_gen/unify": true, } // printPackageMu synchronizes the printing of type-checked package files in From b7c869854962603ecffe5be6dc5c650fe2e07df9 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 13 Aug 2025 15:30:27 -0400 Subject: [PATCH 133/139] [dev.simd] simd/_gen: migrate simdgen from x/arch This moves the simdgen tool and its supporting unify package from golang.org/x/arch/internal as of CL 695619 to simd/_gen in the main repo. The simdgen tool was started in x/arch to live next to xeddata and a few other assembler generators that already lived there. However, as we've been developing simdgen, we've discovered that there's a tremendous amount of process friction coordinating commits to x/arch with the corresponding generated files in the main repo. Many of the existing generators in x/arch were started before modules existed. In GOPATH world, it was impractical for them to live in the main repo because they have dependencies that are not allowed in the main repo. However, now that we have modules and can use small submodules in the main repo, we can isolate these dependencies to just the generators, making it practical for them to live in the main repo. This commit was generated by the following script: # Checks set -e if [[ ! -d src/simd ]]; then echo >&2 "$PWD is not the root of the main repo on dev.simd" exit 1 fi if [[ -z "$XEDDATA" ]]; then echo >&2 "Must set \$XEDDATA" exit 1 fi which go >/dev/null # Move simdgen from x/arch xarch=$(mktemp -d) git clone https://go.googlesource.com/arch $xarch xarchCL=$(git -C $xarch log -1 --format=%b | awk -F/ '/^Reviewed-on:/ {print $NF}') echo >&2 "x/arch CL: $xarchCL" mv $xarch/internal src/simd/_gen sed --in-place s,golang.org/x/arch/internal/,simd/_gen/, src/simd/_gen/*/*.go # Create self-contained module cat > src/simd/_gen/go.mod < Reviewed-by: David Chase --- src/simd/_gen/go.mod | 8 + src/simd/_gen/go.sum | 6 + src/simd/_gen/simdgen/.gitignore | 3 + src/simd/_gen/simdgen/asm.yaml.toy | 107 +++ src/simd/_gen/simdgen/categories.yaml | 1 + src/simd/_gen/simdgen/etetest.sh | 33 + src/simd/_gen/simdgen/gen_simdGenericOps.go | 70 ++ src/simd/_gen/simdgen/gen_simdIntrinsics.go | 151 ++++ src/simd/_gen/simdgen/gen_simdMachineOps.go | 122 +++ src/simd/_gen/simdgen/gen_simdTypes.go | 631 ++++++++++++++ src/simd/_gen/simdgen/gen_simdrules.go | 211 +++++ src/simd/_gen/simdgen/gen_simdssa.go | 173 ++++ src/simd/_gen/simdgen/gen_utility.go | 729 ++++++++++++++++ src/simd/_gen/simdgen/go.yaml | 1 + src/simd/_gen/simdgen/godefs.go | 379 +++++++++ src/simd/_gen/simdgen/main.go | 280 +++++++ .../_gen/simdgen/ops/AddSub/categories.yaml | 37 + src/simd/_gen/simdgen/ops/AddSub/go.yaml | 77 ++ .../simdgen/ops/BitwiseLogic/categories.yaml | 20 + .../_gen/simdgen/ops/BitwiseLogic/go.yaml | 128 +++ .../_gen/simdgen/ops/Compares/categories.yaml | 43 + src/simd/_gen/simdgen/ops/Compares/go.yaml | 141 ++++ .../_gen/simdgen/ops/Converts/categories.yaml | 10 + src/simd/_gen/simdgen/ops/Converts/go.yaml | 21 + .../simdgen/ops/FPonlyArith/categories.yaml | 85 ++ src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml | 62 ++ .../simdgen/ops/GaloisField/categories.yaml | 21 + src/simd/_gen/simdgen/ops/GaloisField/go.yaml | 32 + .../simdgen/ops/IntOnlyArith/categories.yaml | 21 + .../_gen/simdgen/ops/IntOnlyArith/go.yaml | 45 + .../_gen/simdgen/ops/MLOps/categories.yaml | 47 ++ src/simd/_gen/simdgen/ops/MLOps/go.yaml | 113 +++ .../_gen/simdgen/ops/MinMax/categories.yaml | 9 + src/simd/_gen/simdgen/ops/MinMax/go.yaml | 42 + .../_gen/simdgen/ops/Moves/categories.yaml | 72 ++ src/simd/_gen/simdgen/ops/Moves/go.yaml | 372 +++++++++ src/simd/_gen/simdgen/ops/Mul/categories.yaml | 14 + src/simd/_gen/simdgen/ops/Mul/go.yaml | 73 ++ .../simdgen/ops/ShiftRotate/categories.yaml | 103 +++ src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml | 172 ++++ src/simd/_gen/simdgen/pprint.go | 73 ++ src/simd/_gen/simdgen/sort_test.go | 41 + src/simd/_gen/simdgen/types.yaml | 90 ++ src/simd/_gen/simdgen/xed.go | 780 ++++++++++++++++++ src/simd/_gen/unify/closure.go | 154 ++++ src/simd/_gen/unify/domain.go | 359 ++++++++ src/simd/_gen/unify/dot.go | 221 +++++ src/simd/_gen/unify/env.go | 480 +++++++++++ src/simd/_gen/unify/html.go | 123 +++ src/simd/_gen/unify/pos.go | 33 + src/simd/_gen/unify/testdata/stress.yaml | 33 + src/simd/_gen/unify/testdata/unify.yaml | 174 ++++ src/simd/_gen/unify/testdata/vars.yaml | 175 ++++ src/simd/_gen/unify/trace.go | 168 ++++ src/simd/_gen/unify/unify.go | 322 ++++++++ src/simd/_gen/unify/unify_test.go | 154 ++++ src/simd/_gen/unify/value.go | 167 ++++ src/simd/_gen/unify/value_test.go | 50 ++ src/simd/_gen/unify/yaml.go | 619 ++++++++++++++ src/simd/_gen/unify/yaml_test.go | 202 +++++ 60 files changed, 9083 insertions(+) create mode 100644 src/simd/_gen/go.mod create mode 100644 src/simd/_gen/go.sum create mode 100644 src/simd/_gen/simdgen/.gitignore create mode 100644 src/simd/_gen/simdgen/asm.yaml.toy create mode 100644 src/simd/_gen/simdgen/categories.yaml create mode 100755 src/simd/_gen/simdgen/etetest.sh create mode 100644 src/simd/_gen/simdgen/gen_simdGenericOps.go create mode 100644 src/simd/_gen/simdgen/gen_simdIntrinsics.go create mode 100644 src/simd/_gen/simdgen/gen_simdMachineOps.go create mode 100644 src/simd/_gen/simdgen/gen_simdTypes.go create mode 100644 src/simd/_gen/simdgen/gen_simdrules.go create mode 100644 src/simd/_gen/simdgen/gen_simdssa.go create mode 100644 src/simd/_gen/simdgen/gen_utility.go create mode 100644 src/simd/_gen/simdgen/go.yaml create mode 100644 src/simd/_gen/simdgen/godefs.go create mode 100644 src/simd/_gen/simdgen/main.go create mode 100644 src/simd/_gen/simdgen/ops/AddSub/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/AddSub/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Compares/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Compares/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Converts/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Converts/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/GaloisField/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/GaloisField/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/MLOps/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/MLOps/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/MinMax/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/MinMax/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Moves/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Moves/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Mul/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Mul/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml create mode 100644 src/simd/_gen/simdgen/pprint.go create mode 100644 src/simd/_gen/simdgen/sort_test.go create mode 100644 src/simd/_gen/simdgen/types.yaml create mode 100644 src/simd/_gen/simdgen/xed.go create mode 100644 src/simd/_gen/unify/closure.go create mode 100644 src/simd/_gen/unify/domain.go create mode 100644 src/simd/_gen/unify/dot.go create mode 100644 src/simd/_gen/unify/env.go create mode 100644 src/simd/_gen/unify/html.go create mode 100644 src/simd/_gen/unify/pos.go create mode 100644 src/simd/_gen/unify/testdata/stress.yaml create mode 100644 src/simd/_gen/unify/testdata/unify.yaml create mode 100644 src/simd/_gen/unify/testdata/vars.yaml create mode 100644 src/simd/_gen/unify/trace.go create mode 100644 src/simd/_gen/unify/unify.go create mode 100644 src/simd/_gen/unify/unify_test.go create mode 100644 src/simd/_gen/unify/value.go create mode 100644 src/simd/_gen/unify/value_test.go create mode 100644 src/simd/_gen/unify/yaml.go create mode 100644 src/simd/_gen/unify/yaml_test.go diff --git a/src/simd/_gen/go.mod b/src/simd/_gen/go.mod new file mode 100644 index 00000000000000..fa360f560a4e0f --- /dev/null +++ b/src/simd/_gen/go.mod @@ -0,0 +1,8 @@ +module simd/_gen + +go 1.24 + +require ( + golang.org/x/arch v0.20.0 + gopkg.in/yaml.v3 v3.0.1 +) diff --git a/src/simd/_gen/go.sum b/src/simd/_gen/go.sum new file mode 100644 index 00000000000000..a39a57ee9ec688 --- /dev/null +++ b/src/simd/_gen/go.sum @@ -0,0 +1,6 @@ +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/simd/_gen/simdgen/.gitignore b/src/simd/_gen/simdgen/.gitignore new file mode 100644 index 00000000000000..de579f6b9bf8dc --- /dev/null +++ b/src/simd/_gen/simdgen/.gitignore @@ -0,0 +1,3 @@ +testdata/* +.gemini/* +.gemini* diff --git a/src/simd/_gen/simdgen/asm.yaml.toy b/src/simd/_gen/simdgen/asm.yaml.toy new file mode 100644 index 00000000000000..7885c776c2a300 --- /dev/null +++ b/src/simd/_gen/simdgen/asm.yaml.toy @@ -0,0 +1,107 @@ +# Hand-written toy input like -xedPath would generate. +# This input can be substituted for -xedPath. +!sum +- asm: ADDPS + goarch: amd64 + feature: "SSE2" + in: + - asmPos: 0 + class: vreg + base: float + elemBits: 32 + bits: 128 + - asmPos: 1 + class: vreg + base: float + elemBits: 32 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: float + elemBits: 32 + bits: 128 + +- asm: ADDPD + goarch: amd64 + feature: "SSE2" + in: + - asmPos: 0 + class: vreg + base: float + elemBits: 64 + bits: 128 + - asmPos: 1 + class: vreg + base: float + elemBits: 64 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: float + elemBits: 64 + bits: 128 + +- asm: PADDB + goarch: amd64 + feature: "SSE2" + in: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 32 + bits: 128 + - asmPos: 1 + class: vreg + base: int|uint + elemBits: 32 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 32 + bits: 128 + +- asm: VPADDB + goarch: amd64 + feature: "AVX" + in: + - asmPos: 1 + class: vreg + base: int|uint + elemBits: 8 + bits: 128 + - asmPos: 2 + class: vreg + base: int|uint + elemBits: 8 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 8 + bits: 128 + +- asm: VPADDB + goarch: amd64 + feature: "AVX2" + in: + - asmPos: 1 + class: vreg + base: int|uint + elemBits: 8 + bits: 256 + - asmPos: 2 + class: vreg + base: int|uint + elemBits: 8 + bits: 256 + out: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 8 + bits: 256 diff --git a/src/simd/_gen/simdgen/categories.yaml b/src/simd/_gen/simdgen/categories.yaml new file mode 100644 index 00000000000000..ed4c96458d8f0d --- /dev/null +++ b/src/simd/_gen/simdgen/categories.yaml @@ -0,0 +1 @@ +!import ops/*/categories.yaml diff --git a/src/simd/_gen/simdgen/etetest.sh b/src/simd/_gen/simdgen/etetest.sh new file mode 100755 index 00000000000000..7b5001ecbbe58c --- /dev/null +++ b/src/simd/_gen/simdgen/etetest.sh @@ -0,0 +1,33 @@ +#!/bin/bash -x + +cat <<\\EOF + +This is an end-to-end test of Go SIMD. It checks out a fresh Go +repository from the go.simd branch, then generates the SIMD input +files and runs simdgen writing into the fresh repository. + +After that it generates the modified ssa pattern matching files, then +builds the compiler. + +\EOF + +rm -rf go-test +git clone https://go.googlesource.com/go -b dev.simd go-test +go run . -xedPath xeddata -o godefs -goroot ./go-test go.yaml types.yaml categories.yaml +(cd go-test/src/cmd/compile/internal/ssa/_gen ; go run *.go ) +(cd go-test/src ; GOEXPERIMENT=simd ./make.bash ) +(cd go-test/bin; b=`pwd` ; cd ../src/simd/testdata; GOARCH=amd64 $b/go run .) +(cd go-test/bin; b=`pwd` ; cd ../src ; +GOEXPERIMENT=simd GOARCH=amd64 $b/go test -v simd +GOEXPERIMENT=simd $b/go test go/doc +GOEXPERIMENT=simd $b/go test go/build +GOEXPERIMENT=simd $b/go test cmd/api -v -check +$b/go test go/doc +$b/go test go/build +$b/go test cmd/api -v -check + +$b/go test cmd/compile/internal/ssagen -simd=0 +GOEXPERIMENT=simd $b/go test cmd/compile/internal/ssagen -simd=0 +) + +# next, add some tests of SIMD itself diff --git a/src/simd/_gen/simdgen/gen_simdGenericOps.go b/src/simd/_gen/simdgen/gen_simdGenericOps.go new file mode 100644 index 00000000000000..3dbbeb09f7298b --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdGenericOps.go @@ -0,0 +1,70 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "sort" +) + +const simdGenericOpsTmpl = ` +package main + +func simdGenericOps() []opData { + return []opData{ +{{- range .Ops }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, commutative: {{.Comm}}}, +{{- end }} +{{- range .OpsImm }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, commutative: {{.Comm}}, aux: "UInt8"}, +{{- end }} + } +} +` + +// writeSIMDGenericOps generates the generic ops and writes it to simdAMD64ops.go +// within the specified directory. +func writeSIMDGenericOps(ops []Operation) *bytes.Buffer { + t := templateOf(simdGenericOpsTmpl, "simdgenericOps") + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader) + + type genericOpsData struct { + OpName string + OpInLen int + Comm bool + } + type opData struct { + Ops []genericOpsData + OpsImm []genericOpsData + } + var opsData opData + for _, op := range ops { + if op.NoGenericOps != nil && *op.NoGenericOps == "true" { + continue + } + _, _, _, immType, gOp := op.shape() + gOpData := genericOpsData{gOp.GenericName(), len(gOp.In), op.Commutative} + if immType == VarImm || immType == ConstVarImm { + opsData.OpsImm = append(opsData.OpsImm, gOpData) + } else { + opsData.Ops = append(opsData.Ops, gOpData) + } + } + sort.Slice(opsData.Ops, func(i, j int) bool { + return compareNatural(opsData.Ops[i].OpName, opsData.Ops[j].OpName) < 0 + }) + sort.Slice(opsData.OpsImm, func(i, j int) bool { + return compareNatural(opsData.OpsImm[i].OpName, opsData.OpsImm[j].OpName) < 0 + }) + + err := t.Execute(buffer, opsData) + if err != nil { + panic(fmt.Errorf("failed to execute template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdIntrinsics.go b/src/simd/_gen/simdgen/gen_simdIntrinsics.go new file mode 100644 index 00000000000000..6a1501e17bf89d --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdIntrinsics.go @@ -0,0 +1,151 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "slices" +) + +const simdIntrinsicsTmpl = ` +{{define "header"}} +package ssagen + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "cmd/internal/sys" +) + +const simdPackage = "` + simdPackage + `" + +func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { +{{end}} + +{{define "op1"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen1(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op2"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen2(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op2_21"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen2_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op2_21Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen3(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_21"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen3_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_21Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_231Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3_231(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_31"}} addF(simdPackage, "{{(index .In 2).Go}}.{{.Go}}", opLen3_31(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op4"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen4(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op4_231Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen4_231(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op4_31"}} addF(simdPackage, "{{(index .In 2).Go}}.{{.Go}}", opLen4_31(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op1Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen1Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op2Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op2Imm8_2I"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2Imm8_2I(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op3Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op3Imm8_2I"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3Imm8_2I(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op4Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen4Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} + +{{define "vectorConversion"}} addF(simdPackage, "{{.Tsrc.Name}}.As{{.Tdst.Name}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) +{{end}} + +{{define "loadStore"}} addF(simdPackage, "Load{{.Name}}", simdLoad(), sys.AMD64) + addF(simdPackage, "{{.Name}}.Store", simdStore(), sys.AMD64) +{{end}} + +{{define "maskedLoadStore"}} addF(simdPackage, "LoadMasked{{.Name}}", simdMaskedLoad(ssa.OpLoadMasked{{.ElemBits}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.StoreMasked", simdMaskedStore(ssa.OpStoreMasked{{.ElemBits}}), sys.AMD64) +{{end}} + +{{define "mask"}} addF(simdPackage, "{{.Name}}.As{{.VectorCounterpart}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "{{.VectorCounterpart}}.As{{.Name}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "{{.Name}}.And", opLen2(ssa.OpAnd{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.Or", opLen2(ssa.OpOr{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) + addF(simdPackage, "Load{{.Name}}FromBits", simdLoadMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.StoreToBits", simdStoreMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) + addF(simdPackage, "{{.Name}}FromBits", simdCvtVToMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.ToBits", simdCvtMaskToV({{.ElemBits}}, {{.Lanes}}), sys.AMD64) +{{end}} + +{{define "footer"}}} +{{end}} +` + +// writeSIMDIntrinsics generates the intrinsic mappings and writes it to simdintrinsics.go +// within the specified directory. +func writeSIMDIntrinsics(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { + t := templateOf(simdIntrinsicsTmpl, "simdintrinsics") + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader) + + if err := t.ExecuteTemplate(buffer, "header", nil); err != nil { + panic(fmt.Errorf("failed to execute header template: %w", err)) + } + + slices.SortFunc(ops, compareOperations) + + for _, op := range ops { + if op.NoTypes != nil && *op.NoTypes == "true" { + continue + } + if s, op, err := classifyOp(op); err == nil { + if err := t.ExecuteTemplate(buffer, s, op); err != nil { + panic(fmt.Errorf("failed to execute template %s for op %s: %w", s, op.Go, err)) + } + + } else { + panic(fmt.Errorf("failed to classify op %v: %w", op.Go, err)) + } + } + + for _, conv := range vConvertFromTypeMap(typeMap) { + if err := t.ExecuteTemplate(buffer, "vectorConversion", conv); err != nil { + panic(fmt.Errorf("failed to execute vectorConversion template: %w", err)) + } + } + + for _, typ := range typesFromTypeMap(typeMap) { + if typ.Type != "mask" { + if err := t.ExecuteTemplate(buffer, "loadStore", typ); err != nil { + panic(fmt.Errorf("failed to execute loadStore template: %w", err)) + } + } + } + + for _, typ := range typesFromTypeMap(typeMap) { + if typ.MaskedLoadStoreFilter() { + if err := t.ExecuteTemplate(buffer, "maskedLoadStore", typ); err != nil { + panic(fmt.Errorf("failed to execute maskedLoadStore template: %w", err)) + } + } + } + + for _, mask := range masksFromTypeMap(typeMap) { + if err := t.ExecuteTemplate(buffer, "mask", mask); err != nil { + panic(fmt.Errorf("failed to execute mask template: %w", err)) + } + } + + if err := t.ExecuteTemplate(buffer, "footer", nil); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go new file mode 100644 index 00000000000000..64918e5543a3f3 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -0,0 +1,122 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +const simdMachineOpsTmpl = ` +package main + +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { + return []opData{ +{{- range .OpsData }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, +{{- end }} +{{- range .OpsDataImm }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", aux: "UInt8", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, +{{- end }} + } +} +` + +// writeSIMDMachineOps generates the machine ops and writes it to simdAMD64ops.go +// within the specified directory. +func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { + t := templateOf(simdMachineOpsTmpl, "simdAMD64Ops") + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader) + + type opData struct { + OpName string + Asm string + OpInLen int + RegInfo string + Comm bool + Type string + ResultInArg0 bool + } + type machineOpsData struct { + OpsData []opData + OpsDataImm []opData + } + seen := map[string]struct{}{} + regInfoSet := map[string]bool{ + "v11": true, "v21": true, "v2k": true, "v2kv": true, "v2kk": true, "vkv": true, "v31": true, "v3kv": true, "vgpv": true, "vgp": true, "vfpv": true, "vfpkv": true, + "w11": true, "w21": true, "w2k": true, "w2kw": true, "w2kk": true, "wkw": true, "w31": true, "w3kw": true, "wgpw": true, "wgp": true, "wfpw": true, "wfpkw": true} + opsData := make([]opData, 0) + opsDataImm := make([]opData, 0) + for _, op := range ops { + shapeIn, shapeOut, maskType, _, gOp := op.shape() + asm := machineOpName(maskType, gOp) + + // TODO: all our masked operations are now zeroing, we need to generate machine ops with merging masks, maybe copy + // one here with a name suffix "Merging". The rewrite rules will need them. + if _, ok := seen[asm]; ok { + continue + } + seen[asm] = struct{}{} + regInfo, err := op.regShape() + if err != nil { + panic(err) + } + idx, err := checkVecAsScalar(op) + if err != nil { + panic(err) + } + if idx != -1 { + if regInfo == "v21" { + regInfo = "vfpv" + } else if regInfo == "v2kv" { + regInfo = "vfpkv" + } else { + panic(fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regInfo, op)) + } + } + // Makes AVX512 operations use upper registers + if strings.Contains(op.CPUFeature, "AVX512") { + regInfo = strings.ReplaceAll(regInfo, "v", "w") + } + if _, ok := regInfoSet[regInfo]; !ok { + panic(fmt.Errorf("unsupported register constraint, please update the template and AMD64Ops.go: %s. Op is %s", regInfo, op)) + } + var outType string + if shapeOut == OneVregOut || shapeOut == OneVregOutAtIn || gOp.Out[0].OverwriteClass != nil { + // If class overwrite is happening, that's not really a mask but a vreg. + outType = fmt.Sprintf("Vec%d", *gOp.Out[0].Bits) + } else if shapeOut == OneGregOut { + outType = gOp.GoType() // this is a straight Go type, not a VecNNN type + } else if shapeOut == OneKmaskOut { + outType = "Mask" + } else { + panic(fmt.Errorf("simdgen does not recognize this output shape: %d", shapeOut)) + } + resultInArg0 := false + if shapeOut == OneVregOutAtIn { + resultInArg0 = true + } + if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { + opsDataImm = append(opsDataImm, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) + } else { + opsData = append(opsData, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) + } + } + sort.Slice(opsData, func(i, j int) bool { + return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + }) + sort.Slice(opsDataImm, func(i, j int) bool { + return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + }) + err := t.Execute(buffer, machineOpsData{opsData, opsDataImm}) + if err != nil { + panic(fmt.Errorf("failed to execute template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go new file mode 100644 index 00000000000000..a367cce0144be6 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -0,0 +1,631 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "cmp" + "fmt" + "maps" + "slices" + "sort" + "strings" +) + +type simdType struct { + Name string // The go type name of this simd type, for example Int32x4. + Lanes int // The number of elements in this vector/mask. + Base string // The element's type, like for Int32x4 it will be int32. + Fields string // The struct fields, it should be right formatted. + Type string // Either "mask" or "vreg" + VectorCounterpart string // For mask use only: just replacing the "Mask" in [simdType.Name] with "Int" + ReshapedVectorWithAndOr string // For mask use only: vector AND and OR are only available in some shape with element width 32. + Size int // The size of the vector type +} + +func (x simdType) ElemBits() int { + return x.Size / x.Lanes +} + +// LanesContainer returns the smallest int/uint bit size that is +// large enough to hold one bit for each lane. E.g., Mask32x4 +// is 4 lanes, and a uint8 is the smallest uint that has 4 bits. +func (x simdType) LanesContainer() int { + if x.Lanes > 64 { + panic("too many lanes") + } + if x.Lanes > 32 { + return 64 + } + if x.Lanes > 16 { + return 32 + } + if x.Lanes > 8 { + return 16 + } + return 8 +} + +// MaskedLoadStoreFilter encodes which simd type type currently +// get masked loads/stores generated, it is used in two places, +// this forces coordination. +func (x simdType) MaskedLoadStoreFilter() bool { + return x.Size == 512 || x.ElemBits() >= 32 && x.Type != "mask" +} + +func (x simdType) IntelSizeSuffix() string { + switch x.ElemBits() { + case 8: + return "B" + case 16: + return "W" + case 32: + return "D" + case 64: + return "Q" + } + panic("oops") +} + +func (x simdType) MaskedLoadDoc() string { + if x.Size == 512 || x.ElemBits() < 32 { + return fmt.Sprintf("// Asm: VMOVDQU%d.Z, CPU Feature: AVX512", x.ElemBits()) + } else { + return fmt.Sprintf("// Asm: VMASKMOV%s, CPU Feature: AVX2", x.IntelSizeSuffix()) + } +} + +func (x simdType) MaskedStoreDoc() string { + if x.Size == 512 || x.ElemBits() < 32 { + return fmt.Sprintf("// Asm: VMOVDQU%d, CPU Feature: AVX512", x.ElemBits()) + } else { + return fmt.Sprintf("// Asm: VMASKMOV%s, CPU Feature: AVX2", x.IntelSizeSuffix()) + } +} + +func compareSimdTypes(x, y simdType) int { + // "vreg" then "mask" + if c := -compareNatural(x.Type, y.Type); c != 0 { + return c + } + // want "flo" < "int" < "uin" (and then 8 < 16 < 32 < 64), + // not "int16" < "int32" < "int64" < "int8") + // so limit comparison to first 3 bytes in string. + if c := compareNatural(x.Base[:3], y.Base[:3]); c != 0 { + return c + } + // base type size, 8 < 16 < 32 < 64 + if c := x.ElemBits() - y.ElemBits(); c != 0 { + return c + } + // vector size last + return x.Size - y.Size +} + +type simdTypeMap map[int][]simdType + +type simdTypePair struct { + Tsrc simdType + Tdst simdType +} + +func compareSimdTypePairs(x, y simdTypePair) int { + c := compareSimdTypes(x.Tsrc, y.Tsrc) + if c != 0 { + return c + } + return compareSimdTypes(x.Tdst, y.Tdst) +} + +const simdPackageHeader = generatedHeader + ` +//go:build goexperiment.simd + +package simd +` + +const simdTypesTemplates = ` +{{define "sizeTmpl"}} +// v{{.}} is a tag type that tells the compiler that this is really {{.}}-bit SIMD +type v{{.}} struct { + _{{.}} struct{} +} +{{end}} + +{{define "typeTmpl"}} +// {{.Name}} is a {{.Size}}-bit SIMD vector of {{.Lanes}} {{.Base}} +type {{.Name}} struct { +{{.Fields}} +} + +{{end}} +` + +const simdFeaturesTemplate = ` +import "internal/cpu" + +{{range .}} +{{- if eq .Feature "AVX512"}} +// Has{{.Feature}} returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. +// +// These five CPU features are bundled together, and no use of AVX-512 +// is allowed unless all of these features are supported together. +// Nearly every CPU that has shipped with any support for AVX-512 has +// supported all five of these features. +{{- else -}} +// Has{{.Feature}} returns whether the CPU supports the {{.Feature}} feature. +{{- end}} +// +// Has{{.Feature}} is defined on all GOARCHes, but will only return true on +// GOARCH {{.GoArch}}. +func Has{{.Feature}}() bool { + return cpu.X86.Has{{.Feature}} +} +{{end}} +` + +const simdLoadStoreTemplate = ` +// Len returns the number of elements in a {{.Name}} +func (x {{.Name}}) Len() int { return {{.Lanes}} } + +// Load{{.Name}} loads a {{.Name}} from an array +// +//go:noescape +func Load{{.Name}}(y *[{{.Lanes}}]{{.Base}}) {{.Name}} + +// Store stores a {{.Name}} to an array +// +//go:noescape +func (x {{.Name}}) Store(y *[{{.Lanes}}]{{.Base}}) +` + +const simdMaskFromBitsTemplate = ` +// Load{{.Name}}FromBits constructs a {{.Name}} from a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// CPU Features: AVX512 +//go:noescape +func Load{{.Name}}FromBits(y *uint64) {{.Name}} + +// StoreToBits stores a {{.Name}} as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// CPU Features: AVX512 +//go:noescape +func (x {{.Name}}) StoreToBits(y *uint64) +` + +const simdMaskFromValTemplate = ` +// {{.Name}}FromBits constructs a {{.Name}} from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// Asm: KMOV{{.IntelSizeSuffix}}, CPU Feature: AVX512 +func {{.Name}}FromBits(y uint{{.LanesContainer}}) {{.Name}} + +// ToBits constructs a bitmap from a {{.Name}}, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// Asm: KMOV{{.IntelSizeSuffix}}, CPU Features: AVX512 +func (x {{.Name}}) ToBits() uint{{.LanesContainer}} +` + +const simdMaskedLoadStoreTemplate = ` +// LoadMasked{{.Name}} loads a {{.Name}} from an array, +// at those elements enabled by mask +// +{{.MaskedLoadDoc}} +// +//go:noescape +func LoadMasked{{.Name}}(y *[{{.Lanes}}]{{.Base}}, mask Mask{{.ElemBits}}x{{.Lanes}}) {{.Name}} + +// StoreMasked stores a {{.Name}} to an array, +// at those elements enabled by mask +// +{{.MaskedStoreDoc}} +// +//go:noescape +func (x {{.Name}}) StoreMasked(y *[{{.Lanes}}]{{.Base}}, mask Mask{{.ElemBits}}x{{.Lanes}}) +` + +const simdStubsTmpl = ` +{{define "op1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}() {{.GoType}} +{{end}} + +{{define "op2"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op2_21"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op2_21Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op3"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op2NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_31"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op2NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op0NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_21"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}, {{.Op2NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_21Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}, {{.Op2NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_231Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.Op0NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op2VecAsScalar"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}(y uint{{(index .In 1).TreatLikeAScalarOfSize}}) {{(index .Out 0).Go}} +{{end}} + +{{define "op3VecAsScalar"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}(y uint{{(index .In 1).TreatLikeAScalarOfSize}}, {{.Op2NameAndType "z"}}) {{(index .Out 0).Go}} +{{end}} + +{{define "op4"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op2NameAndType "z"}}, {{.Op3NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "op4_231Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.Op0NameAndType "z"}}, {{.Op3NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "op4_31"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op2NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op0NameAndType "z"}}, {{.Op3NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "op1Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8) {{.GoType}} +{{end}} + +{{define "op2Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op2Imm8_2I"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.ImmName}} uint8) {{.GoType}} +{{end}} + + +{{define "op3Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}, {{.Op3NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3Imm8_2I"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.ImmName}} uint8, {{.Op3NameAndType "z"}}) {{.GoType}} +{{end}} + + +{{define "op4Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}, {{.Op3NameAndType "z"}}, {{.Op4NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "vectorConversion"}} +// {{.Tdst.Name}} converts from {{.Tsrc.Name}} to {{.Tdst.Name}} +func (from {{.Tsrc.Name}}) As{{.Tdst.Name}}() (to {{.Tdst.Name}}) +{{end}} + +{{define "mask"}} +// converts from {{.Name}} to {{.VectorCounterpart}} +func (from {{.Name}}) As{{.VectorCounterpart}}() (to {{.VectorCounterpart}}) + +// converts from {{.VectorCounterpart}} to {{.Name}} +func (from {{.VectorCounterpart}}) As{{.Name}}() (to {{.Name}}) + +func (x {{.Name}}) And(y {{.Name}}) {{.Name}} + +func (x {{.Name}}) Or(y {{.Name}}) {{.Name}} +{{end}} +` + +// parseSIMDTypes groups go simd types by their vector sizes, and +// returns a map whose key is the vector size, value is the simd type. +func parseSIMDTypes(ops []Operation) simdTypeMap { + // TODO: maybe instead of going over ops, let's try go over types.yaml. + ret := map[int][]simdType{} + seen := map[string]struct{}{} + processArg := func(arg Operand) { + if arg.Class == "immediate" || arg.Class == "greg" { + // Immediates are not encoded as vector types. + return + } + if _, ok := seen[*arg.Go]; ok { + return + } + seen[*arg.Go] = struct{}{} + + lanes := *arg.Lanes + base := fmt.Sprintf("%s%d", *arg.Base, *arg.ElemBits) + tagFieldNameS := fmt.Sprintf("%sx%d", base, lanes) + tagFieldS := fmt.Sprintf("%s v%d", tagFieldNameS, *arg.Bits) + valFieldS := fmt.Sprintf("vals%s[%d]%s", strings.Repeat(" ", len(tagFieldNameS)-3), lanes, base) + fields := fmt.Sprintf("\t%s\n\t%s", tagFieldS, valFieldS) + if arg.Class == "mask" { + vectorCounterpart := strings.ReplaceAll(*arg.Go, "Mask", "Int") + reshapedVectorWithAndOr := fmt.Sprintf("Int32x%d", *arg.Bits/32) + ret[*arg.Bits] = append(ret[*arg.Bits], simdType{*arg.Go, lanes, base, fields, arg.Class, vectorCounterpart, reshapedVectorWithAndOr, *arg.Bits}) + // In case the vector counterpart of a mask is not present, put its vector counterpart typedef into the map as well. + if _, ok := seen[vectorCounterpart]; !ok { + seen[vectorCounterpart] = struct{}{} + ret[*arg.Bits] = append(ret[*arg.Bits], simdType{vectorCounterpart, lanes, base, fields, "vreg", "", "", *arg.Bits}) + } + } else { + ret[*arg.Bits] = append(ret[*arg.Bits], simdType{*arg.Go, lanes, base, fields, arg.Class, "", "", *arg.Bits}) + } + } + for _, op := range ops { + for _, arg := range op.In { + processArg(arg) + } + for _, arg := range op.Out { + processArg(arg) + } + } + return ret +} + +func vConvertFromTypeMap(typeMap simdTypeMap) []simdTypePair { + v := []simdTypePair{} + for _, ts := range typeMap { + for i, tsrc := range ts { + for j, tdst := range ts { + if i != j && tsrc.Type == tdst.Type && tsrc.Type == "vreg" && + tsrc.Lanes > 1 && tdst.Lanes > 1 { + v = append(v, simdTypePair{tsrc, tdst}) + } + } + } + } + slices.SortFunc(v, compareSimdTypePairs) + return v +} + +func masksFromTypeMap(typeMap simdTypeMap) []simdType { + m := []simdType{} + for _, ts := range typeMap { + for _, tsrc := range ts { + if tsrc.Type == "mask" { + m = append(m, tsrc) + } + } + } + slices.SortFunc(m, compareSimdTypes) + return m +} + +func typesFromTypeMap(typeMap simdTypeMap) []simdType { + m := []simdType{} + for _, ts := range typeMap { + for _, tsrc := range ts { + if tsrc.Lanes > 1 { + m = append(m, tsrc) + } + } + } + slices.SortFunc(m, compareSimdTypes) + return m +} + +// writeSIMDTypes generates the simd vector types into a bytes.Buffer +func writeSIMDTypes(typeMap simdTypeMap) *bytes.Buffer { + t := templateOf(simdTypesTemplates, "types_amd64") + loadStore := templateOf(simdLoadStoreTemplate, "loadstore_amd64") + maskedLoadStore := templateOf(simdMaskedLoadStoreTemplate, "maskedloadstore_amd64") + maskFromBits := templateOf(simdMaskFromBitsTemplate, "maskFromBits_amd64") + maskFromVal := templateOf(simdMaskFromValTemplate, "maskFromVal_amd64") + + buffer := new(bytes.Buffer) + buffer.WriteString(simdPackageHeader) + + sizes := make([]int, 0, len(typeMap)) + for size, types := range typeMap { + slices.SortFunc(types, compareSimdTypes) + sizes = append(sizes, size) + } + sort.Ints(sizes) + + for _, size := range sizes { + if size <= 64 { + // these are scalar + continue + } + if err := t.ExecuteTemplate(buffer, "sizeTmpl", size); err != nil { + panic(fmt.Errorf("failed to execute size template for size %d: %w", size, err)) + } + for _, typeDef := range typeMap[size] { + if typeDef.Lanes == 1 { + continue + } + if err := t.ExecuteTemplate(buffer, "typeTmpl", typeDef); err != nil { + panic(fmt.Errorf("failed to execute type template for type %s: %w", typeDef.Name, err)) + } + if typeDef.Type != "mask" { + if err := loadStore.ExecuteTemplate(buffer, "loadstore_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute loadstore template for type %s: %w", typeDef.Name, err)) + } + // restrict to AVX2 masked loads/stores first. + if typeDef.MaskedLoadStoreFilter() { + if err := maskedLoadStore.ExecuteTemplate(buffer, "maskedloadstore_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute maskedloadstore template for type %s: %w", typeDef.Name, err)) + } + } + } else { + if err := maskFromBits.ExecuteTemplate(buffer, "maskFromBits_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute maskFromBits template for type %s: %w", typeDef.Name, err)) + } + if err := maskFromVal.ExecuteTemplate(buffer, "maskFromVal_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute maskFromVal template for type %s: %w", typeDef.Name, err)) + } + } + } + } + + return buffer +} + +func writeSIMDFeatures(ops []Operation) *bytes.Buffer { + // Gather all features + type featureKey struct { + GoArch string + Feature string + } + featureSet := make(map[featureKey]struct{}) + for _, op := range ops { + featureSet[featureKey{op.GoArch, op.CPUFeature}] = struct{}{} + } + features := slices.SortedFunc(maps.Keys(featureSet), func(a, b featureKey) int { + if c := cmp.Compare(a.GoArch, b.GoArch); c != 0 { + return c + } + return compareNatural(a.Feature, b.Feature) + }) + + // If we ever have the same feature name on more than one GOARCH, we'll have + // to be more careful about this. + t := templateOf(simdFeaturesTemplate, "features") + + buffer := new(bytes.Buffer) + buffer.WriteString(simdPackageHeader) + + if err := t.Execute(buffer, features); err != nil { + panic(fmt.Errorf("failed to execute features template: %w", err)) + } + + return buffer +} + +// writeSIMDStubs generates the simd vector intrinsic stubs and writes it to ops_amd64.go and ops_internal_amd64.go +// within the specified directory. +func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { + t := templateOf(simdStubsTmpl, "simdStubs") + buffer := new(bytes.Buffer) + buffer.WriteString(simdPackageHeader) + + slices.SortFunc(ops, compareOperations) + + for i, op := range ops { + if op.NoTypes != nil && *op.NoTypes == "true" { + continue + } + idxVecAsScalar, err := checkVecAsScalar(op) + if err != nil { + panic(err) + } + if s, op, err := classifyOp(op); err == nil { + if idxVecAsScalar != -1 { + if s == "op2" || s == "op3" { + s += "VecAsScalar" + } else { + panic(fmt.Errorf("simdgen only supports op2 or op3 with TreatLikeAScalarOfSize")) + } + } + if i == 0 || op.Go != ops[i-1].Go { + fmt.Fprintf(buffer, "\n/* %s */\n", op.Go) + } + if err := t.ExecuteTemplate(buffer, s, op); err != nil { + panic(fmt.Errorf("failed to execute template %s for op %v: %w", s, op, err)) + } + } else { + panic(fmt.Errorf("failed to classify op %v: %w", op.Go, err)) + } + } + + vectorConversions := vConvertFromTypeMap(typeMap) + for _, conv := range vectorConversions { + if err := t.ExecuteTemplate(buffer, "vectorConversion", conv); err != nil { + panic(fmt.Errorf("failed to execute vectorConversion template: %w", err)) + } + } + + masks := masksFromTypeMap(typeMap) + for _, mask := range masks { + if err := t.ExecuteTemplate(buffer, "mask", mask); err != nil { + panic(fmt.Errorf("failed to execute mask template for mask %s: %w", mask.Name, err)) + } + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go new file mode 100644 index 00000000000000..b0fc7e62cde1f7 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -0,0 +1,211 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "slices" + "text/template" +) + +type tplRuleData struct { + tplName string // e.g. "sftimm" + GoOp string // e.g. "ShiftAllLeft" + GoType string // e.g. "Uint32x8" + Args string // e.g. "x y" + Asm string // e.g. "VPSLLD256" + ArgsOut string // e.g. "x y" + MaskInConvert string // e.g. "VPMOVVec32x8ToM" + MaskOutConvert string // e.g. "VPMOVMToVec32x8" +} + +var ( + ruleTemplates = template.Must(template.New("simdRules").Parse(` +{{define "pureVreg"}}({{.GoOp}}{{.GoType}} {{.Args}}) => ({{.Asm}} {{.ArgsOut}}) +{{end}} +{{define "maskIn"}}({{.GoOp}}{{.GoType}} {{.Args}} mask) => ({{.Asm}} {{.ArgsOut}} ({{.MaskInConvert}} mask)) +{{end}} +{{define "maskOut"}}({{.GoOp}}{{.GoType}} {{.Args}}) => ({{.MaskOutConvert}} ({{.Asm}} {{.ArgsOut}})) +{{end}} +{{define "maskInMaskOut"}}({{.GoOp}}{{.GoType}} {{.Args}} mask) => ({{.MaskOutConvert}} ({{.Asm}} {{.ArgsOut}} ({{.MaskInConvert}} mask))) +{{end}} +{{define "sftimm"}}({{.Asm}} x (MOVQconst [c])) => ({{.Asm}}const [uint8(c)] x) +{{end}} +{{define "masksftimm"}}({{.Asm}} x (MOVQconst [c]) mask) => ({{.Asm}}const [uint8(c)] x mask) +{{end}} +`)) +) + +// SSA rewrite rules need to appear in a most-to-least-specific order. This works for that. +var tmplOrder = map[string]int{ + "masksftimm": 0, + "sftimm": 1, + "maskInMaskOut": 2, + "maskOut": 3, + "maskIn": 4, + "pureVreg": 5, +} + +func compareTplRuleData(x, y tplRuleData) int { + if c := compareNatural(x.GoOp, y.GoOp); c != 0 { + return c + } + if c := compareNatural(x.GoType, y.GoType); c != 0 { + return c + } + if c := compareNatural(x.Args, y.Args); c != 0 { + return c + } + if x.tplName == y.tplName { + return 0 + } + xo, xok := tmplOrder[x.tplName] + yo, yok := tmplOrder[y.tplName] + if !xok { + panic(fmt.Errorf("Unexpected template name %s, please add to tmplOrder", x.tplName)) + } + if !yok { + panic(fmt.Errorf("Unexpected template name %s, please add to tmplOrder", y.tplName)) + } + return xo - yo +} + +// writeSIMDRules generates the lowering and rewrite rules for ssa and writes it to simdAMD64.rules +// within the specified directory. +func writeSIMDRules(ops []Operation) *bytes.Buffer { + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader + "\n") + + var allData []tplRuleData + + for _, opr := range ops { + if opr.NoGenericOps != nil && *opr.NoGenericOps == "true" { + continue + } + opInShape, opOutShape, maskType, immType, gOp := opr.shape() + asm := machineOpName(maskType, gOp) + vregInCnt := len(gOp.In) + if maskType == OneMask { + vregInCnt-- + } + + data := tplRuleData{ + GoOp: gOp.Go, + Asm: asm, + } + + if vregInCnt == 1 { + data.Args = "x" + data.ArgsOut = data.Args + } else if vregInCnt == 2 { + data.Args = "x y" + data.ArgsOut = data.Args + } else if vregInCnt == 3 { + data.Args = "x y z" + data.ArgsOut = data.Args + } else { + panic(fmt.Errorf("simdgen does not support more than 3 vreg in inputs")) + } + if immType == ConstImm { + data.ArgsOut = fmt.Sprintf("[%s] %s", *opr.In[0].Const, data.ArgsOut) + } else if immType == VarImm { + data.Args = fmt.Sprintf("[a] %s", data.Args) + data.ArgsOut = fmt.Sprintf("[a] %s", data.ArgsOut) + } else if immType == ConstVarImm { + data.Args = fmt.Sprintf("[a] %s", data.Args) + data.ArgsOut = fmt.Sprintf("[a+%s] %s", *opr.In[0].Const, data.ArgsOut) + } + + goType := func(op Operation) string { + if op.OperandOrder != nil { + switch *op.OperandOrder { + case "21Type1", "231Type1": + // Permute uses operand[1] for method receiver. + return *op.In[1].Go + } + } + return *op.In[0].Go + } + var tplName string + // If class overwrite is happening, that's not really a mask but a vreg. + if opOutShape == OneVregOut || opOutShape == OneVregOutAtIn || gOp.Out[0].OverwriteClass != nil { + switch opInShape { + case OneImmIn: + tplName = "pureVreg" + data.GoType = goType(gOp) + case PureVregIn: + tplName = "pureVreg" + data.GoType = goType(gOp) + case OneKmaskImmIn: + fallthrough + case OneKmaskIn: + tplName = "maskIn" + data.GoType = goType(gOp) + rearIdx := len(gOp.In) - 1 + // Mask is at the end. + data.MaskInConvert = fmt.Sprintf("VPMOVVec%dx%dToM", *gOp.In[rearIdx].ElemBits, *gOp.In[rearIdx].Lanes) + case PureKmaskIn: + panic(fmt.Errorf("simdgen does not support pure k mask instructions, they should be generated by compiler optimizations")) + } + } else if opOutShape == OneGregOut { + tplName = "pureVreg" // TODO this will be wrong + data.GoType = goType(gOp) + } else { + // OneKmaskOut case + data.MaskOutConvert = fmt.Sprintf("VPMOVMToVec%dx%d", *gOp.Out[0].ElemBits, *gOp.In[0].Lanes) + switch opInShape { + case OneImmIn: + fallthrough + case PureVregIn: + tplName = "maskOut" + data.GoType = goType(gOp) + case OneKmaskImmIn: + fallthrough + case OneKmaskIn: + tplName = "maskInMaskOut" + data.GoType = goType(gOp) + rearIdx := len(gOp.In) - 1 + data.MaskInConvert = fmt.Sprintf("VPMOVVec%dx%dToM", *gOp.In[rearIdx].ElemBits, *gOp.In[rearIdx].Lanes) + case PureKmaskIn: + panic(fmt.Errorf("simdgen does not support pure k mask instructions, they should be generated by compiler optimizations")) + } + } + + if gOp.SpecialLower != nil { + if *gOp.SpecialLower == "sftimm" { + if data.GoType[0] == 'I' { + // only do these for signed types, it is a duplicate rewrite for unsigned + sftImmData := data + if tplName == "maskIn" { + sftImmData.tplName = "masksftimm" + } else { + sftImmData.tplName = "sftimm" + } + allData = append(allData, sftImmData) + } + } else { + panic("simdgen sees unknwon special lower " + *gOp.SpecialLower + ", maybe implement it?") + } + } + + if tplName == "pureVreg" && data.Args == data.ArgsOut { + data.Args = "..." + data.ArgsOut = "..." + } + data.tplName = tplName + allData = append(allData, data) + } + + slices.SortFunc(allData, compareTplRuleData) + + for _, data := range allData { + if err := ruleTemplates.ExecuteTemplate(buffer, data.tplName, data); err != nil { + panic(fmt.Errorf("failed to execute template %s for %s: %w", data.tplName, data.GoOp+data.GoType, err)) + } + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go new file mode 100644 index 00000000000000..5a5421a815fd2c --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -0,0 +1,173 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "strings" + "text/template" +) + +var ( + ssaTemplates = template.Must(template.New("simdSSA").Parse(` +{{define "header"}}// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +package amd64 + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/internal/obj" + "cmd/internal/obj/x86" +) + +func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { + var p *obj.Prog + switch v.Op {{"{"}}{{end}} +{{define "case"}} + case {{.Cases}}: + p = {{.Helper}}(s, v) +{{end}} +{{define "footer"}} + default: + // Unknown reg shape + return false + } +{{end}} +{{define "zeroing"}} + // Masked operation are always compiled with zeroing. + switch v.Op { + case {{.}}: + x86.ParseSuffix(p, "Z") + } +{{end}} +{{define "ending"}} + return true +} +{{end}}`)) +) + +type tplSSAData struct { + Cases string + Helper string +} + +// writeSIMDSSA generates the ssa to prog lowering codes and writes it to simdssa.go +// within the specified directory. +func writeSIMDSSA(ops []Operation) *bytes.Buffer { + var ZeroingMask []string + regInfoKeys := []string{ + "v11", + "v21", + "v2k", + "v2kv", + "v2kk", + "vkv", + "v31", + "v3kv", + "v11Imm8", + "vkvImm8", + "v21Imm8", + "v2kImm8", + "v2kkImm8", + "v31ResultInArg0", + "v3kvResultInArg0", + "vfpv", + "vfpkv", + "vgpvImm8", + "vgpImm8", + "v2kvImm8", + } + regInfoSet := map[string][]string{} + for _, key := range regInfoKeys { + regInfoSet[key] = []string{} + } + + seen := map[string]struct{}{} + allUnseen := make(map[string][]Operation) + for _, op := range ops { + shapeIn, shapeOut, maskType, _, gOp := op.shape() + asm := machineOpName(maskType, gOp) + + if _, ok := seen[asm]; ok { + continue + } + seen[asm] = struct{}{} + caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm) + if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn { + if gOp.Zeroing == nil { + ZeroingMask = append(ZeroingMask, caseStr) + } + } + regShape, err := op.regShape() + if err != nil { + panic(err) + } + if shapeOut == OneVregOutAtIn { + regShape += "ResultInArg0" + } + if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { + regShape += "Imm8" + } + idx, err := checkVecAsScalar(op) + if err != nil { + panic(err) + } + if idx != -1 { + if regShape == "v21" { + regShape = "vfpv" + } else if regShape == "v2kv" { + regShape = "vfpkv" + } else { + panic(fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regShape, op)) + } + } + if _, ok := regInfoSet[regShape]; !ok { + allUnseen[regShape] = append(allUnseen[regShape], op) + } + regInfoSet[regShape] = append(regInfoSet[regShape], caseStr) + } + if len(allUnseen) != 0 { + panic(fmt.Errorf("unsupported register constraint for prog, please update gen_simdssa.go and amd64/ssa.go: %+v", allUnseen)) + } + + buffer := new(bytes.Buffer) + + if err := ssaTemplates.ExecuteTemplate(buffer, "header", nil); err != nil { + panic(fmt.Errorf("failed to execute header template: %w", err)) + } + + for _, regShape := range regInfoKeys { + // Stable traversal of regInfoSet + cases := regInfoSet[regShape] + if len(cases) == 0 { + continue + } + data := tplSSAData{ + Cases: strings.Join(cases, ",\n\t\t"), + Helper: "simd" + capitalizeFirst(regShape), + } + if err := ssaTemplates.ExecuteTemplate(buffer, "case", data); err != nil { + panic(fmt.Errorf("failed to execute case template for %s: %w", regShape, err)) + } + } + + if err := ssaTemplates.ExecuteTemplate(buffer, "footer", nil); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + + if len(ZeroingMask) != 0 { + if err := ssaTemplates.ExecuteTemplate(buffer, "zeroing", strings.Join(ZeroingMask, ",\n\t\t")); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + } + + if err := ssaTemplates.ExecuteTemplate(buffer, "ending", nil); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_utility.go b/src/simd/_gen/simdgen/gen_utility.go new file mode 100644 index 00000000000000..20ce3c1351a886 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_utility.go @@ -0,0 +1,729 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "go/format" + "log" + "os" + "path/filepath" + "reflect" + "slices" + "sort" + "strings" + "text/template" + "unicode" +) + +func templateOf(temp, name string) *template.Template { + t, err := template.New(name).Parse(temp) + if err != nil { + panic(fmt.Errorf("failed to parse template %s: %w", name, err)) + } + return t +} + +func createPath(goroot string, file string) (*os.File, error) { + fp := filepath.Join(goroot, file) + dir := filepath.Dir(fp) + err := os.MkdirAll(dir, 0755) + if err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) + } + f, err := os.Create(fp) + if err != nil { + return nil, fmt.Errorf("failed to create file %s: %w", fp, err) + } + return f, nil +} + +func formatWriteAndClose(out *bytes.Buffer, goroot string, file string) { + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) + fmt.Fprintf(os.Stderr, "%v\n", err) + panic(err) + } else { + writeAndClose(b, goroot, file) + } +} + +func writeAndClose(b []byte, goroot string, file string) { + ofile, err := createPath(goroot, file) + if err != nil { + panic(err) + } + ofile.Write(b) + ofile.Close() +} + +// numberLines takes a slice of bytes, and returns a string where each line +// is numbered, starting from 1. +func numberLines(data []byte) string { + var buf bytes.Buffer + r := bytes.NewReader(data) + s := bufio.NewScanner(r) + for i := 1; s.Scan(); i++ { + fmt.Fprintf(&buf, "%d: %s\n", i, s.Text()) + } + return buf.String() +} + +type inShape uint8 +type outShape uint8 +type maskShape uint8 +type immShape uint8 + +const ( + InvalidIn inShape = iota + PureVregIn // vector register input only + OneKmaskIn // vector and kmask input + OneImmIn // vector and immediate input + OneKmaskImmIn // vector, kmask, and immediate inputs + PureKmaskIn // only mask inputs. +) + +const ( + InvalidOut outShape = iota + NoOut // no output + OneVregOut // (one) vector register output + OneGregOut // (one) general register output + OneKmaskOut // mask output + OneVregOutAtIn // the first input is also the output +) + +const ( + InvalidMask maskShape = iota + NoMask // no mask + OneMask // with mask (K1 to K7) + AllMasks // a K mask instruction (K0-K7) +) + +const ( + InvalidImm immShape = iota + NoImm // no immediate + ConstImm // const only immediate + VarImm // pure imm argument provided by the users + ConstVarImm // a combination of user arg and const +) + +// opShape returns the several integers describing the shape of the operation, +// and modified versions of the op: +// +// opNoImm is op with its inputs excluding the const imm. +// +// This function does not modify op. +func (op *Operation) shape() (shapeIn inShape, shapeOut outShape, maskType maskShape, immType immShape, + opNoImm Operation) { + if len(op.Out) > 1 { + panic(fmt.Errorf("simdgen only supports 1 output: %s", op)) + } + var outputReg int + if len(op.Out) == 1 { + outputReg = op.Out[0].AsmPos + if op.Out[0].Class == "vreg" { + shapeOut = OneVregOut + } else if op.Out[0].Class == "greg" { + shapeOut = OneGregOut + } else if op.Out[0].Class == "mask" { + shapeOut = OneKmaskOut + } else { + panic(fmt.Errorf("simdgen only supports output of class vreg or mask: %s", op)) + } + } else { + shapeOut = NoOut + // TODO: are these only Load/Stores? + // We manually supported two Load and Store, are those enough? + panic(fmt.Errorf("simdgen only supports 1 output: %s", op)) + } + hasImm := false + maskCount := 0 + hasVreg := false + for _, in := range op.In { + if in.AsmPos == outputReg { + if shapeOut != OneVregOutAtIn && in.AsmPos == 0 && in.Class == "vreg" { + shapeOut = OneVregOutAtIn + } else { + panic(fmt.Errorf("simdgen only support output and input sharing the same position case of \"the first input is vreg and the only output\": %s", op)) + } + } + if in.Class == "immediate" { + // A manual check on XED data found that AMD64 SIMD instructions at most + // have 1 immediates. So we don't need to check this here. + if *in.Bits != 8 { + panic(fmt.Errorf("simdgen only supports immediates of 8 bits: %s", op)) + } + hasImm = true + } else if in.Class == "mask" { + maskCount++ + } else { + hasVreg = true + } + } + opNoImm = *op + + removeImm := func(o *Operation) { + o.In = o.In[1:] + } + if hasImm { + removeImm(&opNoImm) + if op.In[0].Const != nil { + if op.In[0].ImmOffset != nil { + immType = ConstVarImm + } else { + immType = ConstImm + } + } else if op.In[0].ImmOffset != nil { + immType = VarImm + } else { + panic(fmt.Errorf("simdgen requires imm to have at least one of ImmOffset or Const set: %s", op)) + } + } else { + immType = NoImm + } + if maskCount == 0 { + maskType = NoMask + } else { + maskType = OneMask + } + checkPureMask := func() bool { + if hasImm { + panic(fmt.Errorf("simdgen does not support immediates in pure mask operations: %s", op)) + } + if hasVreg { + panic(fmt.Errorf("simdgen does not support more than 1 masks in non-pure mask operations: %s", op)) + } + return false + } + if !hasImm && maskCount == 0 { + shapeIn = PureVregIn + } else if !hasImm && maskCount > 0 { + if maskCount == 1 { + shapeIn = OneKmaskIn + } else { + if checkPureMask() { + return + } + shapeIn = PureKmaskIn + maskType = AllMasks + } + } else if hasImm && maskCount == 0 { + shapeIn = OneImmIn + } else { + if maskCount == 1 { + shapeIn = OneKmaskImmIn + } else { + checkPureMask() + return + } + } + return +} + +// regShape returns a string representation of the register shape. +func (op *Operation) regShape() (string, error) { + _, _, _, _, gOp := op.shape() + var regInfo string + var vRegInCnt, gRegInCnt, kMaskInCnt, vRegOutCnt, gRegOutCnt, kMaskOutCnt int + for _, in := range gOp.In { + if in.Class == "vreg" { + vRegInCnt++ + } else if in.Class == "greg" { + gRegInCnt++ + } else if in.Class == "mask" { + kMaskInCnt++ + } + } + for _, out := range gOp.Out { + // If class overwrite is happening, that's not really a mask but a vreg. + if out.Class == "vreg" || out.OverwriteClass != nil { + vRegOutCnt++ + } else if out.Class == "greg" { + gRegOutCnt++ + } else if out.Class == "mask" { + kMaskOutCnt++ + } + } + var inRegs, inMasks, outRegs, outMasks string + + rmAbbrev := func(s string, i int) string { + if i == 0 { + return "" + } + if i == 1 { + return s + } + return fmt.Sprintf("%s%d", s, i) + + } + + inRegs = rmAbbrev("v", vRegInCnt) + inRegs += rmAbbrev("gp", gRegInCnt) + inMasks = rmAbbrev("k", kMaskInCnt) + + outRegs = rmAbbrev("v", vRegOutCnt) + outRegs += rmAbbrev("gp", gRegOutCnt) + outMasks = rmAbbrev("k", kMaskOutCnt) + + if kMaskInCnt == 0 && kMaskOutCnt == 0 && gRegInCnt == 0 && gRegOutCnt == 0 { + // For pure v we can abbreviate it as v%d%d. + regInfo = fmt.Sprintf("v%d%d", vRegInCnt, vRegOutCnt) + } else if kMaskInCnt == 0 && kMaskOutCnt == 0 { + regInfo = fmt.Sprintf("%s%s", inRegs, outRegs) + } else { + regInfo = fmt.Sprintf("%s%s%s%s", inRegs, inMasks, outRegs, outMasks) + } + return regInfo, nil +} + +// sortOperand sorts op.In by putting immediates first, then vreg, and mask the last. +// TODO: verify that this is a safe assumption of the prog structure. +// from my observation looks like in asm, imms are always the first, +// masks are always the last, with vreg in between. +func (op *Operation) sortOperand() { + priority := map[string]int{"immediate": 0, "vreg": 1, "greg": 1, "mask": 2} + sort.SliceStable(op.In, func(i, j int) bool { + pi := priority[op.In[i].Class] + pj := priority[op.In[j].Class] + if pi != pj { + return pi < pj + } + return op.In[i].AsmPos < op.In[j].AsmPos + }) +} + +// goNormalType returns the Go type name for the result of an Op that +// does not return a vector, i.e., that returns a result in a general +// register. Currently there's only one family of Ops in Go's simd library +// that does this (GetElem), and so this is specialized to work for that, +// but the problem (mismatch betwen hardware register width and Go type +// width) seems likely to recur if there are any other cases. +func (op Operation) goNormalType() string { + if op.Go == "GetElem" { + // GetElem returns an element of the vector into a general register + // but as far as the hardware is concerned, that result is either 32 + // or 64 bits wide, no matter what the vector element width is. + // This is not "wrong" but it is not the right answer for Go source code. + // To get the Go type right, combine the base type ("int", "uint", "float"), + // with the input vector element width in bits (8,16,32,64). + + at := 0 // proper value of at depends on whether immediate was stripped or not + if op.In[at].Class == "immediate" { + at++ + } + return fmt.Sprintf("%s%d", *op.Out[0].Base, *op.In[at].ElemBits) + } + panic(fmt.Errorf("Implement goNormalType for %v", op)) +} + +// SSAType returns the string for the type reference in SSA generation, +// for example in the intrinsics generating template. +func (op Operation) SSAType() string { + if op.Out[0].Class == "greg" { + return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(op.goNormalType())) + } + return fmt.Sprintf("types.TypeVec%d", *op.Out[0].Bits) +} + +// GoType returns the Go type returned by this operation (relative to the simd package), +// for example "int32" or "Int8x16". This is used in a template. +func (op Operation) GoType() string { + if op.Out[0].Class == "greg" { + return op.goNormalType() + } + return *op.Out[0].Go +} + +// ImmName returns the name to use for an operation's immediate operand. +// This can be overriden in the yaml with "name" on an operand, +// otherwise, for now, "constant" +func (op Operation) ImmName() string { + return op.Op0Name("constant") +} + +func (o Operand) OpName(s string) string { + if n := o.Name; n != nil { + return *n + } + if o.Class == "mask" { + return "mask" + } + return s +} + +func (o Operand) OpNameAndType(s string) string { + return o.OpName(s) + " " + *o.Go +} + +// GoExported returns [Go] with first character capitalized. +func (op Operation) GoExported() string { + return capitalizeFirst(op.Go) +} + +// DocumentationExported returns [Documentation] with method name capitalized. +func (op Operation) DocumentationExported() string { + return strings.ReplaceAll(op.Documentation, op.Go, op.GoExported()) +} + +// Op0Name returns the name to use for the 0 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op0Name(s string) string { + return op.In[0].OpName(s) +} + +// Op1Name returns the name to use for the 1 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op1Name(s string) string { + return op.In[1].OpName(s) +} + +// Op2Name returns the name to use for the 2 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op2Name(s string) string { + return op.In[2].OpName(s) +} + +// Op3Name returns the name to use for the 3 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op3Name(s string) string { + return op.In[3].OpName(s) +} + +// Op0NameAndType returns the name and type to use for +// the 0 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op0NameAndType(s string) string { + return op.In[0].OpNameAndType(s) +} + +// Op1NameAndType returns the name and type to use for +// the 1 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op1NameAndType(s string) string { + return op.In[1].OpNameAndType(s) +} + +// Op2NameAndType returns the name and type to use for +// the 2 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op2NameAndType(s string) string { + return op.In[2].OpNameAndType(s) +} + +// Op3NameAndType returns the name and type to use for +// the 3 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op3NameAndType(s string) string { + return op.In[3].OpNameAndType(s) +} + +// Op4NameAndType returns the name and type to use for +// the 4 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op4NameAndType(s string) string { + return op.In[4].OpNameAndType(s) +} + +var immClasses []string = []string{"BAD0Imm", "BAD1Imm", "op1Imm8", "op2Imm8", "op3Imm8", "op4Imm8"} +var classes []string = []string{"BAD0", "op1", "op2", "op3", "op4"} + +// classifyOp returns a classification string, modified operation, and perhaps error based +// on the stub and intrinsic shape for the operation. +// The classification string is in the regular expression set "op[1234](Imm8)?(_)?" +// where the "" suffix is optionally attached to the Operation in its input yaml. +// The classification string is used to select a template or a clause of a template +// for intrinsics declaration and the ssagen intrinisics glue code in the compiler. +func classifyOp(op Operation) (string, Operation, error) { + _, _, _, immType, gOp := op.shape() + + var class string + + if immType == VarImm || immType == ConstVarImm { + switch l := len(op.In); l { + case 1: + return "", op, fmt.Errorf("simdgen does not recognize this operation of only immediate input: %s", op) + case 2, 3, 4, 5: + class = immClasses[l] + default: + return "", op, fmt.Errorf("simdgen does not recognize this operation of input length %d: %s", len(op.In), op) + } + if order := op.OperandOrder; order != nil { + class += "_" + *order + } + return class, op, nil + } else { + switch l := len(gOp.In); l { + case 1, 2, 3, 4: + class = classes[l] + default: + return "", op, fmt.Errorf("simdgen does not recognize this operation of input length %d: %s", len(op.In), op) + } + if order := op.OperandOrder; order != nil { + class += "_" + *order + } + return class, gOp, nil + } +} + +func checkVecAsScalar(op Operation) (idx int, err error) { + idx = -1 + sSize := 0 + for i, o := range op.In { + if o.TreatLikeAScalarOfSize != nil { + if idx == -1 { + idx = i + sSize = *o.TreatLikeAScalarOfSize + } else { + err = fmt.Errorf("simdgen only supports one TreatLikeAScalarOfSize in the arg list: %s", op) + return + } + } + } + if idx >= 0 { + if idx != 1 { + err = fmt.Errorf("simdgen only supports TreatLikeAScalarOfSize at the 2nd arg of the arg list: %s", op) + return + } + if sSize != 8 && sSize != 16 && sSize != 32 && sSize != 64 { + err = fmt.Errorf("simdgen does not recognize this uint size: %d, %s", sSize, op) + return + } + } + return +} + +// dedup is deduping operations in the full structure level. +func dedup(ops []Operation) (deduped []Operation) { + for _, op := range ops { + seen := false + for _, dop := range deduped { + if reflect.DeepEqual(op, dop) { + seen = true + break + } + } + if !seen { + deduped = append(deduped, op) + } + } + return +} + +func (op Operation) GenericName() string { + if op.OperandOrder != nil { + switch *op.OperandOrder { + case "21Type1", "231Type1": + // Permute uses operand[1] for method receiver. + return op.Go + *op.In[1].Go + } + } + if op.In[0].Class == "immediate" { + return op.Go + *op.In[1].Go + } + return op.Go + *op.In[0].Go +} + +// dedupGodef is deduping operations in [Op.Go]+[*Op.In[0].Go] level. +// By deduping, it means picking the least advanced architecture that satisfy the requirement: +// AVX512 will be least preferred. +// If FlagNoDedup is set, it will report the duplicates to the console. +func dedupGodef(ops []Operation) ([]Operation, error) { + seen := map[string][]Operation{} + for _, op := range ops { + _, _, _, _, gOp := op.shape() + + gN := gOp.GenericName() + seen[gN] = append(seen[gN], op) + } + if *FlagReportDup { + for gName, dup := range seen { + if len(dup) > 1 { + log.Printf("Duplicate for %s:\n", gName) + for _, op := range dup { + log.Printf("%s\n", op) + } + } + } + return ops, nil + } + isAVX512 := func(op Operation) bool { + return strings.Contains(op.CPUFeature, "AVX512") + } + deduped := []Operation{} + for _, dup := range seen { + if len(dup) > 1 { + slices.SortFunc(dup, func(i, j Operation) int { + // Put non-AVX512 candidates at the beginning + if !isAVX512(i) && isAVX512(j) { + return -1 + } + if isAVX512(i) && !isAVX512(j) { + return 1 + } + return strings.Compare(i.CPUFeature, j.CPUFeature) + }) + } + deduped = append(deduped, dup[0]) + } + slices.SortFunc(deduped, compareOperations) + return deduped, nil +} + +// Copy op.ConstImm to op.In[0].Const +// This is a hack to reduce the size of defs we need for const imm operations. +func copyConstImm(ops []Operation) error { + for _, op := range ops { + if op.ConstImm == nil { + continue + } + _, _, _, immType, _ := op.shape() + + if immType == ConstImm || immType == ConstVarImm { + op.In[0].Const = op.ConstImm + } + // Otherwise, just not port it - e.g. {VPCMP[BWDQ] imm=0} and {VPCMPEQ[BWDQ]} are + // the same operations "Equal", [dedupgodef] should be able to distinguish them. + } + return nil +} + +func capitalizeFirst(s string) string { + if s == "" { + return "" + } + // Convert the string to a slice of runes to handle multi-byte characters correctly. + r := []rune(s) + r[0] = unicode.ToUpper(r[0]) + return string(r) +} + +// overwrite corrects some errors due to: +// - The XED data is wrong +// - Go's SIMD API requirement, for example AVX2 compares should also produce masks. +// This rewrite has strict constraints, please see the error message. +// These constraints are also explointed in [writeSIMDRules], [writeSIMDMachineOps] +// and [writeSIMDSSA], please be careful when updating these constraints. +func overwrite(ops []Operation) error { + hasClassOverwrite := false + overwrite := func(op []Operand, idx int, o Operation) error { + if op[idx].OverwriteElementBits != nil { + if op[idx].ElemBits == nil { + panic(fmt.Errorf("ElemBits is nil at operand %d of %v", idx, o)) + } + *op[idx].ElemBits = *op[idx].OverwriteElementBits + *op[idx].Lanes = *op[idx].Bits / *op[idx].ElemBits + *op[idx].Go = fmt.Sprintf("%s%dx%d", capitalizeFirst(*op[idx].Base), *op[idx].ElemBits, *op[idx].Lanes) + } + if op[idx].OverwriteClass != nil { + if op[idx].OverwriteBase == nil { + panic(fmt.Errorf("simdgen: [OverwriteClass] must be set together with [OverwriteBase]: %s", op[idx])) + } + oBase := *op[idx].OverwriteBase + oClass := *op[idx].OverwriteClass + if oClass != "mask" { + panic(fmt.Errorf("simdgen: [Class] overwrite only supports overwritting to mask: %s", op[idx])) + } + if oBase != "int" { + panic(fmt.Errorf("simdgen: [Class] overwrite must set [OverwriteBase] to int: %s", op[idx])) + } + if op[idx].Class != "vreg" { + panic(fmt.Errorf("simdgen: [Class] overwrite must be overwriting [Class] from vreg: %s", op[idx])) + } + hasClassOverwrite = true + *op[idx].Base = oBase + op[idx].Class = oClass + *op[idx].Go = fmt.Sprintf("Mask%dx%d", *op[idx].ElemBits, *op[idx].Lanes) + } else if op[idx].OverwriteBase != nil { + oBase := *op[idx].OverwriteBase + *op[idx].Go = strings.ReplaceAll(*op[idx].Go, capitalizeFirst(*op[idx].Base), capitalizeFirst(oBase)) + if op[idx].Class == "greg" { + *op[idx].Go = strings.ReplaceAll(*op[idx].Go, *op[idx].Base, oBase) + } + *op[idx].Base = oBase + } + return nil + } + for i, o := range ops { + hasClassOverwrite = false + for j := range ops[i].In { + if err := overwrite(ops[i].In, j, o); err != nil { + return err + } + if hasClassOverwrite { + return fmt.Errorf("simdgen does not support [OverwriteClass] in inputs: %s", ops[i]) + } + } + for j := range ops[i].Out { + if err := overwrite(ops[i].Out, j, o); err != nil { + return err + } + } + if hasClassOverwrite { + for _, in := range ops[i].In { + if in.Class == "mask" { + return fmt.Errorf("simdgen only supports [OverwriteClass] for operations without mask inputs") + } + } + } + } + return nil +} + +// reportXEDInconsistency reports potential XED inconsistencies. +// We can add more fields to [Operation] to enable more checks and implement it here. +// Supported checks: +// [NameAndSizeCheck]: NAME[BWDQ] should set the elemBits accordingly. +// This check is useful to find inconsistencies, then we can add overwrite fields to +// those defs to correct them manually. +func reportXEDInconsistency(ops []Operation) error { + for _, o := range ops { + if o.NameAndSizeCheck != nil { + suffixSizeMap := map[byte]int{'B': 8, 'W': 16, 'D': 32, 'Q': 64} + checkOperand := func(opr Operand) error { + if opr.ElemBits == nil { + return fmt.Errorf("simdgen expects elemBits to be set when performing NameAndSizeCheck") + } + if v, ok := suffixSizeMap[o.Asm[len(o.Asm)-1]]; !ok { + return fmt.Errorf("simdgen expects asm to end with [BWDQ] when performing NameAndSizeCheck") + } else { + if v != *opr.ElemBits { + return fmt.Errorf("simdgen finds NameAndSizeCheck inconsistency in def: %s", o) + } + } + return nil + } + for _, in := range o.In { + if in.Class != "vreg" && in.Class != "mask" { + continue + } + if in.TreatLikeAScalarOfSize != nil { + // This is an irregular operand, don't check it. + continue + } + if err := checkOperand(in); err != nil { + return err + } + } + for _, out := range o.Out { + if err := checkOperand(out); err != nil { + return err + } + } + } + } + return nil +} + +func (o Operation) String() string { + return pprints(o) +} + +func (op Operand) String() string { + return pprints(op) +} diff --git a/src/simd/_gen/simdgen/go.yaml b/src/simd/_gen/simdgen/go.yaml new file mode 100644 index 00000000000000..4f077c8143f94d --- /dev/null +++ b/src/simd/_gen/simdgen/go.yaml @@ -0,0 +1 @@ +!import ops/*/go.yaml diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go new file mode 100644 index 00000000000000..0022140aaab177 --- /dev/null +++ b/src/simd/_gen/simdgen/godefs.go @@ -0,0 +1,379 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "log" + "regexp" + "slices" + "strconv" + "strings" + + "simd/_gen/unify" +) + +type Operation struct { + rawOperation + + // Go is the Go method name of this operation. + // + // It is derived from the raw Go method name by adding optional suffixes. + // Currently, "Masked" is the only suffix. + Go string + + // Documentation is the doc string for this API. + // + // It is computed from the raw documentation: + // + // - "NAME" is replaced by the Go method name. + // + // - For masked operation, a sentence about masking is added. + Documentation string + + // In is the sequence of parameters to the Go method. + // + // For masked operations, this will have the mask operand appended. + In []Operand +} + +// rawOperation is the unifier representation of an [Operation]. It is +// translated into a more parsed form after unifier decoding. +type rawOperation struct { + Go string // Base Go method name + + GoArch string // GOARCH for this definition + Asm string // Assembly mnemonic + OperandOrder *string // optional Operand order for better Go declarations + // Optional tag to indicate this operation is paired with special generic->machine ssa lowering rules. + // Should be paired with special templates in gen_simdrules.go + SpecialLower *string + + In []Operand // Parameters + InVariant []Operand // Optional parameters + Out []Operand // Results + Commutative bool // Commutativity + CPUFeature string // CPUID/Has* feature name + Zeroing *bool // nil => use asm suffix ".Z"; false => do not use asm suffix ".Z" + Documentation *string // Documentation will be appended to the stubs comments. + // ConstMask is a hack to reduce the size of defs the user writes for const-immediate + // If present, it will be copied to [In[0].Const]. + ConstImm *string + // NameAndSizeCheck is used to check [BWDQ] maps to (8|16|32|64) elemBits. + NameAndSizeCheck *bool + // If non-nil, all generation in gen_simdTypes.go and gen_intrinsics will be skipped. + NoTypes *string + // If non-nil, all generation in gen_simdGenericOps and gen_simdrules will be skipped. + NoGenericOps *string + // If non-nil, this string will be attached to the machine ssa op name. + SSAVariant *string +} + +func (o *Operation) DecodeUnified(v *unify.Value) error { + if err := v.Decode(&o.rawOperation); err != nil { + return err + } + + isMasked := false + if len(o.InVariant) == 0 { + // No variant + } else if len(o.InVariant) == 1 && o.InVariant[0].Class == "mask" { + isMasked = true + } else { + return fmt.Errorf("unknown inVariant") + } + + // Compute full Go method name. + o.Go = o.rawOperation.Go + if isMasked { + o.Go += "Masked" + } + + // Compute doc string. + if o.rawOperation.Documentation != nil { + o.Documentation = *o.rawOperation.Documentation + } else { + o.Documentation = "// UNDOCUMENTED" + } + o.Documentation = regexp.MustCompile(`\bNAME\b`).ReplaceAllString(o.Documentation, o.Go) + if isMasked { + o.Documentation += "\n//\n// This operation is applied selectively under a write mask." + } + + o.In = append(o.rawOperation.In, o.rawOperation.InVariant...) + + return nil +} + +func (o *Operation) VectorWidth() int { + out := o.Out[0] + if out.Class == "vreg" { + return *out.Bits + } else if out.Class == "greg" || out.Class == "mask" { + for i := range o.In { + if o.In[i].Class == "vreg" { + return *o.In[i].Bits + } + } + } + panic(fmt.Errorf("Figure out what the vector width is for %v and implement it", *o)) +} + +func machineOpName(maskType maskShape, gOp Operation) string { + asm := gOp.Asm + if maskType == 2 { + asm += "Masked" + } + asm = fmt.Sprintf("%s%d", asm, gOp.VectorWidth()) + if gOp.SSAVariant != nil { + asm += *gOp.SSAVariant + } + return asm +} + +func compareStringPointers(x, y *string) int { + if x != nil && y != nil { + return compareNatural(*x, *y) + } + if x == nil && y == nil { + return 0 + } + if x == nil { + return -1 + } + return 1 +} + +func compareIntPointers(x, y *int) int { + if x != nil && y != nil { + return *x - *y + } + if x == nil && y == nil { + return 0 + } + if x == nil { + return -1 + } + return 1 +} + +func compareOperations(x, y Operation) int { + if c := compareNatural(x.Go, y.Go); c != 0 { + return c + } + xIn, yIn := x.In, y.In + + if len(xIn) > len(yIn) && xIn[len(xIn)-1].Class == "mask" { + xIn = xIn[:len(xIn)-1] + } else if len(xIn) < len(yIn) && yIn[len(yIn)-1].Class == "mask" { + yIn = yIn[:len(yIn)-1] + } + + if len(xIn) < len(yIn) { + return -1 + } + if len(xIn) > len(yIn) { + return 1 + } + if len(x.Out) < len(y.Out) { + return -1 + } + if len(x.Out) > len(y.Out) { + return 1 + } + for i := range xIn { + ox, oy := &xIn[i], &yIn[i] + if c := compareOperands(ox, oy); c != 0 { + return c + } + } + return 0 +} + +func compareOperands(x, y *Operand) int { + if c := compareNatural(x.Class, y.Class); c != 0 { + return c + } + if x.Class == "immediate" { + return compareStringPointers(x.ImmOffset, y.ImmOffset) + } else { + if c := compareStringPointers(x.Base, y.Base); c != 0 { + return c + } + if c := compareIntPointers(x.ElemBits, y.ElemBits); c != 0 { + return c + } + if c := compareIntPointers(x.Bits, y.Bits); c != 0 { + return c + } + return 0 + } +} + +type Operand struct { + Class string // One of "mask", "immediate", "vreg", "greg", and "mem" + + Go *string // Go type of this operand + AsmPos int // Position of this operand in the assembly instruction + + Base *string // Base Go type ("int", "uint", "float") + ElemBits *int // Element bit width + Bits *int // Total vector bit width + + Const *string // Optional constant value for immediates. + // Optional immediate arg offsets. If this field is non-nil, + // This operand will be an immediate operand: + // The compiler will right-shift the user-passed value by ImmOffset and set it as the AuxInt + // field of the operation. + ImmOffset *string + Name *string // optional name in the Go intrinsic declaration + Lanes *int // *Lanes equals Bits/ElemBits except for scalars, when *Lanes == 1 + // TreatLikeAScalarOfSize means only the lower $TreatLikeAScalarOfSize bits of the vector + // is used, so at the API level we can make it just a scalar value of this size; Then we + // can overwrite it to a vector of the right size during intrinsics stage. + TreatLikeAScalarOfSize *int + // If non-nil, it means the [Class] field is overwritten here, right now this is used to + // overwrite the results of AVX2 compares to masks. + OverwriteClass *string + // If non-nil, it means the [Base] field is overwritten here. This field exist solely + // because Intel's XED data is inconsistent. e.g. VANDNP[SD] marks its operand int. + OverwriteBase *string + // If non-nil, it means the [ElementBits] field is overwritten. This field exist solely + // because Intel's XED data is inconsistent. e.g. AVX512 VPMADDUBSW marks its operand + // elemBits 16, which should be 8. + OverwriteElementBits *int +} + +// isDigit returns true if the byte is an ASCII digit. +func isDigit(b byte) bool { + return b >= '0' && b <= '9' +} + +// compareNatural performs a "natural sort" comparison of two strings. +// It compares non-digit sections lexicographically and digit sections +// numerically. In the case of string-unequal "equal" strings like +// "a01b" and "a1b", strings.Compare breaks the tie. +// +// It returns: +// +// -1 if s1 < s2 +// 0 if s1 == s2 +// +1 if s1 > s2 +func compareNatural(s1, s2 string) int { + i, j := 0, 0 + len1, len2 := len(s1), len(s2) + + for i < len1 && j < len2 { + // Find a non-digit segment or a number segment in both strings. + if isDigit(s1[i]) && isDigit(s2[j]) { + // Number segment comparison. + numStart1 := i + for i < len1 && isDigit(s1[i]) { + i++ + } + num1, _ := strconv.Atoi(s1[numStart1:i]) + + numStart2 := j + for j < len2 && isDigit(s2[j]) { + j++ + } + num2, _ := strconv.Atoi(s2[numStart2:j]) + + if num1 < num2 { + return -1 + } + if num1 > num2 { + return 1 + } + // If numbers are equal, continue to the next segment. + } else { + // Non-digit comparison. + if s1[i] < s2[j] { + return -1 + } + if s1[i] > s2[j] { + return 1 + } + i++ + j++ + } + } + + // deal with a01b vs a1b; there needs to be an order. + return strings.Compare(s1, s2) +} + +const generatedHeader = `// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. +` + +func writeGoDefs(path string, cl unify.Closure) error { + // TODO: Merge operations with the same signature but multiple + // implementations (e.g., SSE vs AVX) + var ops []Operation + for def := range cl.All() { + var op Operation + if !def.Exact() { + continue + } + if err := def.Decode(&op); err != nil { + log.Println(err.Error()) + log.Println(def) + continue + } + // TODO: verify that this is safe. + op.sortOperand() + ops = append(ops, op) + } + slices.SortFunc(ops, compareOperations) + // The parsed XED data might contain duplicates, like + // 512 bits VPADDP. + deduped := dedup(ops) + slices.SortFunc(deduped, compareOperations) + + if *Verbose { + log.Printf("dedup len: %d\n", len(ops)) + } + var err error + if err = overwrite(deduped); err != nil { + return err + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + if !*FlagNoDedup { + // TODO: This can hide mistakes in the API definitions, especially when + // multiple patterns result in the same API unintentionally. Make it stricter. + if deduped, err = dedupGodef(deduped); err != nil { + return err + } + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + if !*FlagNoConstImmPorting { + if err = copyConstImm(deduped); err != nil { + return err + } + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + reportXEDInconsistency(deduped) + typeMap := parseSIMDTypes(deduped) + + formatWriteAndClose(writeSIMDTypes(typeMap), path, "src/"+simdPackage+"/types_amd64.go") + formatWriteAndClose(writeSIMDFeatures(deduped), path, "src/"+simdPackage+"/cpu.go") + formatWriteAndClose(writeSIMDStubs(deduped, typeMap), path, "src/"+simdPackage+"/ops_amd64.go") + formatWriteAndClose(writeSIMDIntrinsics(deduped, typeMap), path, "src/cmd/compile/internal/ssagen/simdintrinsics.go") + formatWriteAndClose(writeSIMDGenericOps(deduped), path, "src/cmd/compile/internal/ssa/_gen/simdgenericOps.go") + formatWriteAndClose(writeSIMDMachineOps(deduped), path, "src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go") + formatWriteAndClose(writeSIMDSSA(deduped), path, "src/cmd/compile/internal/amd64/simdssa.go") + writeAndClose(writeSIMDRules(deduped).Bytes(), path, "src/cmd/compile/internal/ssa/_gen/simdAMD64.rules") + + return nil +} diff --git a/src/simd/_gen/simdgen/main.go b/src/simd/_gen/simdgen/main.go new file mode 100644 index 00000000000000..537dde0c66dd01 --- /dev/null +++ b/src/simd/_gen/simdgen/main.go @@ -0,0 +1,280 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// simdgen is an experiment in generating Go <-> asm SIMD mappings. +// +// Usage: simdgen [-xedPath=path] [-q=query] input.yaml... +// +// If -xedPath is provided, one of the inputs is a sum of op-code definitions +// generated from the Intel XED data at path. +// +// If input YAML files are provided, each file is read as an input value. See +// [unify.Closure.UnmarshalYAML] or "go doc unify.Closure.UnmarshalYAML" for the +// format of these files. +// +// TODO: Example definitions and values. +// +// The command unifies across all of the inputs and prints all possible results +// of this unification. +// +// If the -q flag is provided, its string value is parsed as a value and treated +// as another input to unification. This is intended as a way to "query" the +// result, typically by narrowing it down to a small subset of results. +// +// Typical usage: +// +// go run . -xedPath $XEDPATH *.yaml +// +// To see just the definitions generated from XED, run: +// +// go run . -xedPath $XEDPATH +// +// (This works because if there's only one input, there's nothing to unify it +// with, so the result is simply itself.) +// +// To see just the definitions for VPADDQ: +// +// go run . -xedPath $XEDPATH -q '{asm: VPADDQ}' +// +// simdgen can also generate Go definitions of SIMD mappings: +// To generate go files to the go root, run: +// +// go run . -xedPath $XEDPATH -o godefs -goroot $PATH/TO/go go.yaml categories.yaml types.yaml +// +// types.yaml is already written, it specifies the shapes of vectors. +// categories.yaml and go.yaml contains definitions that unifies with types.yaml and XED +// data, you can find an example in ops/AddSub/. +// +// When generating Go definitions, simdgen do 3 "magic"s: +// - It splits masked operations(with op's [Masked] field set) to const and non const: +// - One is a normal masked operation, the original +// - The other has its mask operand's [Const] fields set to "K0". +// - This way the user does not need to provide a separate "K0"-masked operation def. +// +// - It deduplicates intrinsic names that have duplicates: +// - If there are two operations that shares the same signature, one is AVX512 the other +// is before AVX512, the other will be selected. +// - This happens often when some operations are defined both before AVX512 and after. +// This way the user does not need to provide a separate "K0" operation for the +// AVX512 counterpart. +// +// - It copies the op's [ConstImm] field to its immediate operand's [Const] field. +// - This way the user does not need to provide verbose op definition while only +// the const immediate field is different. This is useful to reduce verbosity of +// compares with imm control predicates. +// +// These 3 magics could be disabled by enabling -nosplitmask, -nodedup or +// -noconstimmporting flags. +// +// simdgen right now only supports amd64, -arch=$OTHERARCH will trigger a fatal error. +package main + +// Big TODOs: +// +// - This can produce duplicates, which can also lead to less efficient +// environment merging. Add hashing and use it for deduplication. Be careful +// about how this shows up in debug traces, since it could make things +// confusing if we don't show it happening. +// +// - Do I need Closure, Value, and Domain? It feels like I should only need two +// types. + +import ( + "cmp" + "flag" + "fmt" + "log" + "maps" + "os" + "path/filepath" + "runtime/pprof" + "slices" + "strings" + + "gopkg.in/yaml.v3" + "simd/_gen/unify" +) + +var ( + xedPath = flag.String("xedPath", "", "load XED datafiles from `path`") + flagQ = flag.String("q", "", "query: read `def` as another input (skips final validation)") + flagO = flag.String("o", "yaml", "output type: yaml, godefs (generate definitions into a Go source tree") + flagGoDefRoot = flag.String("goroot", ".", "the path to the Go dev directory that will receive the generated files") + FlagNoDedup = flag.Bool("nodedup", false, "disable deduplicating godefs of 2 qualifying operations from different extensions") + FlagNoConstImmPorting = flag.Bool("noconstimmporting", false, "disable const immediate porting from op to imm operand") + FlagArch = flag.String("arch", "amd64", "the target architecture") + + Verbose = flag.Bool("v", false, "verbose") + + flagDebugXED = flag.Bool("debug-xed", false, "show XED instructions") + flagDebugUnify = flag.Bool("debug-unify", false, "print unification trace") + flagDebugHTML = flag.String("debug-html", "", "write unification trace to `file.html`") + FlagReportDup = flag.Bool("reportdup", false, "report the duplicate godefs") + + flagCPUProfile = flag.String("cpuprofile", "", "write CPU profile to `file`") + flagMemProfile = flag.String("memprofile", "", "write memory profile to `file`") +) + +const simdPackage = "simd" + +func main() { + flag.Parse() + + if *flagCPUProfile != "" { + f, err := os.Create(*flagCPUProfile) + if err != nil { + log.Fatalf("-cpuprofile: %s", err) + } + defer f.Close() + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + if *flagMemProfile != "" { + f, err := os.Create(*flagMemProfile) + if err != nil { + log.Fatalf("-memprofile: %s", err) + } + defer func() { + pprof.WriteHeapProfile(f) + f.Close() + }() + } + + var inputs []unify.Closure + + if *FlagArch != "amd64" { + log.Fatalf("simdgen only supports amd64") + } + + // Load XED into a defs set. + if *xedPath != "" { + xedDefs := loadXED(*xedPath) + inputs = append(inputs, unify.NewSum(xedDefs...)) + } + + // Load query. + if *flagQ != "" { + r := strings.NewReader(*flagQ) + def, err := unify.Read(r, "", unify.ReadOpts{}) + if err != nil { + log.Fatalf("parsing -q: %s", err) + } + inputs = append(inputs, def) + } + + // Load defs files. + must := make(map[*unify.Value]struct{}) + for _, path := range flag.Args() { + defs, err := unify.ReadFile(path, unify.ReadOpts{}) + if err != nil { + log.Fatal(err) + } + inputs = append(inputs, defs) + + if filepath.Base(path) == "go.yaml" { + // These must all be used in the final result + for def := range defs.Summands() { + must[def] = struct{}{} + } + } + } + + // Prepare for unification + if *flagDebugUnify { + unify.Debug.UnifyLog = os.Stderr + } + if *flagDebugHTML != "" { + f, err := os.Create(*flagDebugHTML) + if err != nil { + log.Fatal(err) + } + unify.Debug.HTML = f + defer f.Close() + } + + // Unify! + unified, err := unify.Unify(inputs...) + if err != nil { + log.Fatal(err) + } + + // Print results. + switch *flagO { + case "yaml": + // Produce a result that looks like encoding a slice, but stream it. + fmt.Println("!sum") + var val1 [1]*unify.Value + for val := range unified.All() { + val1[0] = val + // We have to make a new encoder each time or it'll print a document + // separator between each object. + enc := yaml.NewEncoder(os.Stdout) + if err := enc.Encode(val1); err != nil { + log.Fatal(err) + } + enc.Close() + } + case "godefs": + if err := writeGoDefs(*flagGoDefRoot, unified); err != nil { + log.Fatalf("Failed writing godefs: %+v", err) + } + } + + if !*Verbose && *xedPath != "" { + if operandRemarks == 0 { + fmt.Fprintf(os.Stderr, "XED decoding generated no errors, which is unusual.\n") + } else { + fmt.Fprintf(os.Stderr, "XED decoding generated %d \"errors\" which is not cause for alarm, use -v for details.\n", operandRemarks) + } + } + + // Validate results. + // + // Don't validate if this is a command-line query because that tends to + // eliminate lots of required defs and is used in cases where maybe defs + // aren't enumerable anyway. + if *flagQ == "" && len(must) > 0 { + validate(unified, must) + } +} + +func validate(cl unify.Closure, required map[*unify.Value]struct{}) { + // Validate that: + // 1. All final defs are exact + // 2. All required defs are used + for def := range cl.All() { + if _, ok := def.Domain.(unify.Def); !ok { + fmt.Fprintf(os.Stderr, "%s: expected Def, got %T\n", def.PosString(), def.Domain) + continue + } + + if !def.Exact() { + fmt.Fprintf(os.Stderr, "%s: def not reduced to an exact value, why is %s:\n", def.PosString(), def.WhyNotExact()) + fmt.Fprintf(os.Stderr, "\t%s\n", strings.ReplaceAll(def.String(), "\n", "\n\t")) + } + + for root := range def.Provenance() { + delete(required, root) + } + } + // Report unused defs + unused := slices.SortedFunc(maps.Keys(required), + func(a, b *unify.Value) int { + return cmp.Or( + cmp.Compare(a.Pos().Path, b.Pos().Path), + cmp.Compare(a.Pos().Line, b.Pos().Line), + ) + }) + for _, def := range unused { + // TODO: Can we say anything more actionable? This is always a problem + // with unification: if it fails, it's very hard to point a finger at + // any particular reason. We could go back and try unifying this again + // with each subset of the inputs (starting with individual inputs) to + // at least say "it doesn't unify with anything in x.yaml". That's a lot + // of work, but if we have trouble debugging unification failure it may + // be worth it. + fmt.Fprintf(os.Stderr, "%s: def required, but did not unify (%v)\n", + def.PosString(), def) + } +} diff --git a/src/simd/_gen/simdgen/ops/AddSub/categories.yaml b/src/simd/_gen/simdgen/ops/AddSub/categories.yaml new file mode 100644 index 00000000000000..35e81042186030 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/AddSub/categories.yaml @@ -0,0 +1,37 @@ +!sum +- go: Add + commutative: true + documentation: !string |- + // NAME adds corresponding elements of two vectors. +- go: AddSaturated + commutative: true + documentation: !string |- + // NAME adds corresponding elements of two vectors with saturation. +- go: Sub + commutative: false + documentation: !string |- + // NAME subtracts corresponding elements of two vectors. +- go: SubSaturated + commutative: false + documentation: !string |- + // NAME subtracts corresponding elements of two vectors with saturation. +- go: AddPairs + commutative: false + documentation: !string |- + // NAME horizontally adds adjacent pairs of elements. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +- go: SubPairs + commutative: false + documentation: !string |- + // NAME horizontally subtracts adjacent pairs of elements. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +- go: AddPairsSaturated + commutative: false + documentation: !string |- + // NAME horizontally adds adjacent pairs of elements with saturation. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +- go: SubPairsSaturated + commutative: false + documentation: !string |- + // NAME horizontally subtracts adjacent pairs of elements with saturation. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. diff --git a/src/simd/_gen/simdgen/ops/AddSub/go.yaml b/src/simd/_gen/simdgen/ops/AddSub/go.yaml new file mode 100644 index 00000000000000..4423d8c7c682d6 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/AddSub/go.yaml @@ -0,0 +1,77 @@ +!sum +# Add +- go: Add + asm: "VPADD[BWDQ]|VADDP[SD]" + in: + - &any + go: $t + - *any + out: + - *any +# Add Saturated +- go: AddSaturated + asm: "VPADDS[BWDQ]" + in: + - &int + go: $t + base: int + - *int + out: + - *int +- go: AddSaturated + asm: "VPADDUS[BWDQ]" + in: + - &uint + go: $t + base: uint + - *uint + out: + - *uint + +# Sub +- go: Sub + asm: "VPSUB[BWDQ]|VSUBP[SD]" + in: &2any + - *any + - *any + out: &1any + - *any +# Sub Saturated +- go: SubSaturated + asm: "VPSUBS[BWDQ]" + in: &2int + - *int + - *int + out: &1int + - *int +- go: SubSaturated + asm: "VPSUBUS[BWDQ]" + in: + - *uint + - *uint + out: + - *uint +- go: AddPairs + asm: "VPHADD[DW]" + in: *2any + out: *1any +- go: SubPairs + asm: "VPHSUB[DW]" + in: *2any + out: *1any +- go: AddPairs + asm: "VHADDP[SD]" # floats + in: *2any + out: *1any +- go: SubPairs + asm: "VHSUBP[SD]" # floats + in: *2any + out: *1any +- go: AddPairsSaturated + asm: "VPHADDS[DW]" + in: *2int + out: *1int +- go: SubPairsSaturated + asm: "VPHSUBS[DW]" + in: *2int + out: *1int diff --git a/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml b/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml new file mode 100644 index 00000000000000..3142d1910d364d --- /dev/null +++ b/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml @@ -0,0 +1,20 @@ +!sum +- go: And + commutative: true + documentation: !string |- + // NAME performs a bitwise AND operation between two vectors. +- go: Or + commutative: true + documentation: !string |- + // NAME performs a bitwise OR operation between two vectors. +- go: AndNot + commutative: false + documentation: !string |- + // NAME performs a bitwise x &^ y. +- go: Xor + commutative: true + documentation: !string |- + // NAME performs a bitwise XOR operation between two vectors. + +# We also have PTEST and VPTERNLOG, those should be hidden from the users +# and only appear in rewrite rules. diff --git a/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml b/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml new file mode 100644 index 00000000000000..ab344438fb27dc --- /dev/null +++ b/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml @@ -0,0 +1,128 @@ +!sum +# In the XED data, *all* floating point bitwise logic operation has their +# operand type marked as uint. We are not trying to understand why Intel +# decided that they want FP bit-wise logic operations, but this irregularity +# has to be dealed with in separate rules with some overwrites. + +# For many bit-wise operations, we have the following non-orthogonal +# choices: +# +# - Non-masked AVX operations have no element width (because it +# doesn't matter), but only cover 128 and 256 bit vectors. +# +# - Masked AVX-512 operations have an element width (because it needs +# to know how to interpret the mask), and cover 128, 256, and 512 bit +# vectors. These only cover 32- and 64-bit element widths. +# +# - Non-masked AVX-512 operations still have an element width (because +# they're just the masked operations with an implicit K0 mask) but it +# doesn't matter! This is the only option for non-masked 512 bit +# operations, and we can pick any of the element widths. +# +# We unify with ALL of these operations and the compiler generator +# picks when there are multiple options. + +# TODO: We don't currently generate unmasked bit-wise operations on 512 bit +# vectors of 8- or 16-bit elements. AVX-512 only has *masked* bit-wise +# operations for 32- and 64-bit elements; while the element width doesn't matter +# for unmasked operations, right now we don't realize that we can just use the +# 32- or 64-bit version for the unmasked form. Maybe in the XED decoder we +# should recognize bit-wise operations when generating unmasked versions and +# omit the element width. + +# For binary operations, we constrain their two inputs and one output to the +# same Go type using a variable. + +- go: And + asm: "VPAND[DQ]?" + in: + - &any + go: $t + - *any + out: + - *any + +- go: And + asm: "VPANDD" # Fill in the gap, And is missing for Uint8x64 and Int8x64 + inVariant: [] + in: &twoI8x64 + - &i8x64 + go: $t + overwriteElementBits: 8 + - *i8x64 + out: &oneI8x64 + - *i8x64 + +- go: And + asm: "VPANDD" # Fill in the gap, And is missing for Uint16x32 and Int16x32 + inVariant: [] + in: &twoI16x32 + - &i16x32 + go: $t + overwriteElementBits: 16 + - *i16x32 + out: &oneI16x32 + - *i16x32 + +- go: AndNot + asm: "VPANDN[DQ]?" + operandOrder: "21" # switch the arg order + in: + - *any + - *any + out: + - *any + +- go: AndNot + asm: "VPANDND" # Fill in the gap, AndNot is missing for Uint8x64 and Int8x64 + operandOrder: "21" # switch the arg order + inVariant: [] + in: *twoI8x64 + out: *oneI8x64 + +- go: AndNot + asm: "VPANDND" # Fill in the gap, AndNot is missing for Uint16x32 and Int16x32 + operandOrder: "21" # switch the arg order + inVariant: [] + in: *twoI16x32 + out: *oneI16x32 + +- go: Or + asm: "VPOR[DQ]?" + in: + - *any + - *any + out: + - *any + +- go: Or + asm: "VPORD" # Fill in the gap, Or is missing for Uint8x64 and Int8x64 + inVariant: [] + in: *twoI8x64 + out: *oneI8x64 + +- go: Or + asm: "VPORD" # Fill in the gap, Or is missing for Uint16x32 and Int16x32 + inVariant: [] + in: *twoI16x32 + out: *oneI16x32 + +- go: Xor + asm: "VPXOR[DQ]?" + in: + - *any + - *any + out: + - *any + +- go: Xor + asm: "VPXORD" # Fill in the gap, Or is missing for Uint8x64 and Int8x64 + inVariant: [] + in: *twoI8x64 + out: *oneI8x64 + +- go: Xor + asm: "VPXORD" # Fill in the gap, Or is missing for Uint16x32 and Int16x32 + inVariant: [] + in: *twoI16x32 + out: *oneI16x32 \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Compares/categories.yaml b/src/simd/_gen/simdgen/ops/Compares/categories.yaml new file mode 100644 index 00000000000000..aa07ade27e693d --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Compares/categories.yaml @@ -0,0 +1,43 @@ +!sum +# const imm predicate(holds for both float and int|uint): +# 0: Equal +# 1: Less +# 2: LessEqual +# 4: NotEqual +# 5: GreaterEqual +# 6: Greater +- go: Equal + constImm: 0 + commutative: true + documentation: !string |- + // NAME compares for equality. +- go: Less + constImm: 1 + commutative: false + documentation: !string |- + // NAME compares for less than. +- go: LessEqual + constImm: 2 + commutative: false + documentation: !string |- + // NAME compares for less than or equal. +- go: IsNan # For float only. + constImm: 3 + commutative: true + documentation: !string |- + // NAME checks if elements are NaN. Use as x.IsNan(x). +- go: NotEqual + constImm: 4 + commutative: true + documentation: !string |- + // NAME compares for inequality. +- go: GreaterEqual + constImm: 13 + commutative: false + documentation: !string |- + // NAME compares for greater than or equal. +- go: Greater + constImm: 14 + commutative: false + documentation: !string |- + // NAME compares for greater than. diff --git a/src/simd/_gen/simdgen/ops/Compares/go.yaml b/src/simd/_gen/simdgen/ops/Compares/go.yaml new file mode 100644 index 00000000000000..0f9162839c9b32 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Compares/go.yaml @@ -0,0 +1,141 @@ +!sum +# Ints +- go: Equal + asm: "V?PCMPEQ[BWDQ]" + in: + - &any + go: $t + - *any + out: + - &anyvregToMask + go: $t + overwriteBase: int + overwriteClass: mask +- go: Greater + asm: "V?PCMPGT[BWDQ]" + in: + - &int + go: $t + base: int + - *int + out: + - *anyvregToMask +# 256-bit VCMPGTQ's output elemBits is marked 32-bit in the XED data, we +# believe this is an error, so add this definition to overwrite. +- go: Greater + asm: "VPCMPGTQ" + in: + - &int64 + go: $t + base: int + elemBits: 64 + - *int64 + out: + - base: int + elemBits: 32 + overwriteElementBits: 64 + overwriteClass: mask + overwriteBase: int + +# TODO these are redundant with VPCMP operations. +# AVX-512 compares produce masks. +- go: Equal + asm: "V?PCMPEQ[BWDQ]" + in: + - *any + - *any + out: + - class: mask +- go: Greater + asm: "V?PCMPGT[BWDQ]" + in: + - *int + - *int + out: + - class: mask + +# MASKED signed comparisons for X/Y registers +# unmasked would clash with emulations on AVX2 +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMP[BWDQ]" + in: + - &int + bits: (128|256) + go: $t + base: int + - *int + - class: immediate + const: 0 # Just a placeholder, will be overwritten by const imm porting. + inVariant: + - class: mask + out: + - class: mask + +# MASKED unsigned comparisons for X/Y registers +# unmasked would clash with emulations on AVX2 +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMPU[BWDQ]" + in: + - &uint + bits: (128|256) + go: $t + base: uint + - *uint + - class: immediate + const: 0 + inVariant: + - class: mask + out: + - class: mask + +# masked/unmasked signed comparisons for Z registers +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMP[BWDQ]" + in: + - &int + bits: 512 + go: $t + base: int + - *int + - class: immediate + const: 0 # Just a placeholder, will be overwritten by const imm porting. + out: + - class: mask + +# masked/unmasked unsigned comparisons for Z registers +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMPU[BWDQ]" + in: + - &uint + bits: 512 + go: $t + base: uint + - *uint + - class: immediate + const: 0 + out: + - class: mask + +# Floats +- go: Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual|IsNan + asm: "VCMPP[SD]" + in: + - &float + go: $t + base: float + - *float + - class: immediate + const: 0 + out: + - go: $t + overwriteBase: int + overwriteClass: mask +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual|IsNan) + asm: "VCMPP[SD]" + in: + - *float + - *float + - class: immediate + const: 0 + out: + - class: mask \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml new file mode 100644 index 00000000000000..cc6c419dcc47b4 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -0,0 +1,10 @@ +!sum +- go: ConvertToInt32 + commutative: false + documentation: !string |- + // ConvertToInt32 converts element values to int32. + +- go: ConvertToUint32 + commutative: false + documentation: !string |- + // ConvertToUint32Masked converts element values to uint32. diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml new file mode 100644 index 00000000000000..4e251728bf9b70 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -0,0 +1,21 @@ +!sum +- go: ConvertToInt32 + asm: "VCVTTPS2DQ" + in: + - &fp + go: $t + base: float + out: + - &i32 + go: $u + base: int + elemBits: 32 +- go: ConvertToUint32 + asm: "VCVTPS2UDQ" + in: + - *fp + out: + - &u32 + go: $u + base: uint + elemBits: 32 diff --git a/src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml b/src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml new file mode 100644 index 00000000000000..f2d8af68867e11 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml @@ -0,0 +1,85 @@ +!sum +- go: Div + commutative: false + documentation: !string |- + // NAME divides elements of two vectors. +- go: Sqrt + commutative: false + documentation: !string |- + // NAME computes the square root of each element. +- go: Reciprocal + commutative: false + documentation: !string |- + // NAME computes an approximate reciprocal of each element. +- go: ReciprocalSqrt + commutative: false + documentation: !string |- + // NAME computes an approximate reciprocal of the square root of each element. +- go: Scale + commutative: false + documentation: !string |- + // NAME multiplies elements by a power of 2. +- go: RoundToEven + commutative: false + constImm: 0 + documentation: !string |- + // NAME rounds elements to the nearest integer. +- go: RoundToEvenScaled + commutative: false + constImm: 0 + documentation: !string |- + // NAME rounds elements with specified precision. +- go: RoundToEvenScaledResidue + commutative: false + constImm: 0 + documentation: !string |- + // NAME computes the difference after rounding with specified precision. +- go: Floor + commutative: false + constImm: 1 + documentation: !string |- + // NAME rounds elements down to the nearest integer. +- go: FloorScaled + commutative: false + constImm: 1 + documentation: !string |- + // NAME rounds elements down with specified precision. +- go: FloorScaledResidue + commutative: false + constImm: 1 + documentation: !string |- + // NAME computes the difference after flooring with specified precision. +- go: Ceil + commutative: false + constImm: 2 + documentation: !string |- + // NAME rounds elements up to the nearest integer. +- go: CeilScaled + commutative: false + constImm: 2 + documentation: !string |- + // NAME rounds elements up with specified precision. +- go: CeilScaledResidue + commutative: false + constImm: 2 + documentation: !string |- + // NAME computes the difference after ceiling with specified precision. +- go: Trunc + commutative: false + constImm: 3 + documentation: !string |- + // NAME truncates elements towards zero. +- go: TruncScaled + commutative: false + constImm: 3 + documentation: !string |- + // NAME truncates elements with specified precision. +- go: TruncScaledResidue + commutative: false + constImm: 3 + documentation: !string |- + // NAME computes the difference after truncating with specified precision. +- go: AddSub + commutative: false + documentation: !string |- + // NAME subtracts even elements and adds odd elements of two vectors. diff --git a/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml b/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml new file mode 100644 index 00000000000000..e164f7b70a4c9e --- /dev/null +++ b/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml @@ -0,0 +1,62 @@ +!sum +- go: Div + asm: "V?DIVP[SD]" + in: &2fp + - &fp + go: $t + base: float + - *fp + out: &1fp + - *fp +- go: Sqrt + asm: "V?SQRTP[SD]" + in: *1fp + out: *1fp +# TODO: Provide separate methods for 12-bit precision and 14-bit precision? +- go: Reciprocal + asm: "VRCP(14)?P[SD]" + in: *1fp + out: *1fp +- go: ReciprocalSqrt + asm: "V?RSQRT(14)?P[SD]" + in: *1fp + out: *1fp +- go: Scale + asm: "VSCALEFP[SD]" + in: *2fp + out: *1fp + +- go: "RoundToEven|Ceil|Floor|Trunc" + asm: "VROUNDP[SD]" + in: + - *fp + - class: immediate + const: 0 # place holder + out: *1fp + +- go: "(RoundToEven|Ceil|Floor|Trunc)Scaled" + asm: "VRNDSCALEP[SD]" + in: + - *fp + - class: immediate + const: 0 # place holder + immOffset: 4 # "M", round to numbers with M digits after dot(by means of binary number). + name: prec + out: *1fp +- go: "(RoundToEven|Ceil|Floor|Trunc)ScaledResidue" + asm: "VREDUCEP[SD]" + in: + - *fp + - class: immediate + const: 0 # place holder + immOffset: 4 # "M", round to numbers with M digits after dot(by means of binary number). + name: prec + out: *1fp + +- go: "AddSub" + asm: "VADDSUBP[SD]" + in: + - *fp + - *fp + out: + - *fp diff --git a/src/simd/_gen/simdgen/ops/GaloisField/categories.yaml b/src/simd/_gen/simdgen/ops/GaloisField/categories.yaml new file mode 100644 index 00000000000000..258246253447e2 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/GaloisField/categories.yaml @@ -0,0 +1,21 @@ +!sum +- go: GaloisFieldAffineTransform + commutative: false + documentation: !string |- + // NAME computes an affine transformation in GF(2^8): + // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; + // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y + // corresponding to a group of 8 elements in x. +- go: GaloisFieldAffineTransformInverse + commutative: false + documentation: !string |- + // NAME computes an affine transformation in GF(2^8), + // with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: + // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; + // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y + // corresponding to a group of 8 elements in x. +- go: GaloisFieldMul + commutative: false + documentation: !string |- + // NAME computes element-wise GF(2^8) multiplication with + // reduction polynomial x^8 + x^4 + x^3 + x + 1. diff --git a/src/simd/_gen/simdgen/ops/GaloisField/go.yaml b/src/simd/_gen/simdgen/ops/GaloisField/go.yaml new file mode 100644 index 00000000000000..e86211cb46aeba --- /dev/null +++ b/src/simd/_gen/simdgen/ops/GaloisField/go.yaml @@ -0,0 +1,32 @@ +!sum +- go: GaloisFieldAffineTransform + asm: VGF2P8AFFINEQB + operandOrder: 2I # 2nd operand, then immediate + in: &AffineArgs + - &uint8 + go: $t + base: uint + - &uint8x8 + go: $t2 + base: uint + - &pureImmVar + class: immediate + immOffset: 0 + name: b + out: + - *uint8 + +- go: GaloisFieldAffineTransformInverse + asm: VGF2P8AFFINEINVQB + operandOrder: 2I # 2nd operand, then immediate + in: *AffineArgs + out: + - *uint8 + +- go: GaloisFieldMul + asm: VGF2P8MULB + in: + - *uint8 + - *uint8 + out: + - *uint8 diff --git a/src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml b/src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml new file mode 100644 index 00000000000000..bf33642a1147e9 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml @@ -0,0 +1,21 @@ +!sum +- go: Average + commutative: true + documentation: !string |- + // NAME computes the rounded average of corresponding elements. +- go: Abs + commutative: false + # Unary operation, not commutative + documentation: !string |- + // NAME computes the absolute value of each element. +- go: CopySign + # Applies sign of second operand to first: sign(val, sign_src) + commutative: false + documentation: !string |- + // NAME returns the product of the first operand with -1, 0, or 1, + // whichever constant is nearest to the value of the second operand. + # Sign does not have masked version +- go: OnesCount + commutative: false + documentation: !string |- + // NAME counts the number of set bits in each element. diff --git a/src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml b/src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml new file mode 100644 index 00000000000000..54938b4f2e2fe3 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml @@ -0,0 +1,45 @@ +!sum +# Average (unsigned byte, unsigned word) +# Instructions: VPAVGB, VPAVGW +- go: Average + asm: "VPAVG[BW]" # Matches VPAVGB (byte) and VPAVGW (word) + in: + - &uint_t # $t will be Uint8xN for VPAVGB, Uint16xN for VPAVGW + go: $t + base: uint + - *uint_t + out: + - *uint_t + +# Absolute Value (signed byte, word, dword, qword) +# Instructions: VPABSB, VPABSW, VPABSD, VPABSQ +- go: Abs + asm: "VPABS[BWDQ]" # Matches VPABSB, VPABSW, VPABSD, VPABSQ + in: + - &int_t # $t will be Int8xN, Int16xN, Int32xN, Int64xN + go: $t + base: int + out: + - *int_t # Output is magnitude, fits in the same signed type + +# Sign Operation (signed byte, word, dword) +# Applies sign of second operand to the first. +# Instructions: VPSIGNB, VPSIGNW, VPSIGND +- go: CopySign + asm: "VPSIGN[BWD]" # Matches VPSIGNB, VPSIGNW, VPSIGND + in: + - *int_t # value to apply sign to + - *int_t # value from which to take the sign + out: + - *int_t + +# Population Count (count set bits in each element) +# Instructions: VPOPCNTB, VPOPCNTW (AVX512_BITALG) +# VPOPCNTD, VPOPCNTQ (AVX512_VPOPCNTDQ) +- go: OnesCount + asm: "VPOPCNT[BWDQ]" + in: + - &any + go: $t + out: + - *any diff --git a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml new file mode 100644 index 00000000000000..97381e1e347865 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml @@ -0,0 +1,47 @@ +!sum +- go: DotProdPairs + commutative: false + documentation: !string |- + // NAME multiplies the elements and add the pairs together, + // yielding a vector of half as many elements with twice the input element size. +# TODO: maybe simplify this name within the receiver-type + method-naming scheme we use. +- go: DotProdPairsSaturated + commutative: false + documentation: !string |- + // NAME multiplies the elements and add the pairs together with saturation, + // yielding a vector of half as many elements with twice the input element size. +# QuadDotProd, i.e. VPDPBUSD(S) are operations with src/dst on the same register, we are not supporting this as of now. +# - go: DotProdBroadcast +# commutative: true +# # documentation: !string |- +# // NAME multiplies all elements and broadcasts the sum. +- go: AddDotProdQuadruple + commutative: false + documentation: !string |- + // NAME performs dot products on groups of 4 elements of x and y and then adds z. +- go: AddDotProdQuadrupleSaturated + commutative: false + documentation: !string |- + // NAME multiplies performs dot products on groups of 4 elements of x and y and then adds z. +- go: AddDotProdPairs + commutative: false + noTypes: "true" + noGenericOps: "true" + documentation: !string |- + // NAME performs dot products on pairs of elements of y and z and then adds x. +- go: AddDotProdPairsSaturated + commutative: false + documentation: !string |- + // NAME performs dot products on pairs of elements of y and z and then adds x. +- go: MulAdd + commutative: false + documentation: !string |- + // NAME performs a fused (x * y) + z. +- go: MulAddSub + commutative: false + documentation: !string |- + // NAME performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +- go: MulSubAdd + commutative: false + documentation: !string |- + // NAME performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. diff --git a/src/simd/_gen/simdgen/ops/MLOps/go.yaml b/src/simd/_gen/simdgen/ops/MLOps/go.yaml new file mode 100644 index 00000000000000..f6b6f135b8c338 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MLOps/go.yaml @@ -0,0 +1,113 @@ +!sum +- go: DotProdPairs + asm: VPMADDWD + in: + - &int + go: $t + base: int + - *int + out: + - &int2 # The elemBits are different + go: $t2 + base: int +- go: DotProdPairsSaturated + asm: VPMADDUBSW + in: + - &uint + go: $t + base: uint + overwriteElementBits: 8 + - &int3 + go: $t3 + base: int + overwriteElementBits: 8 + out: + - *int2 +# - go: DotProdBroadcast +# asm: VDPP[SD] +# in: +# - &dpb_src +# go: $t +# - *dpb_src +# - class: immediate +# const: 127 +# out: +# - *dpb_src +- go: AddDotProdQuadruple + asm: "VPDPBUSD" + operandOrder: "31" # switch operand 3 and 1 + in: + - &qdpa_acc + go: $t_acc + base: int + elemBits: 32 + - &qdpa_src1 + go: $t_src1 + base: uint + overwriteElementBits: 8 + - &qdpa_src2 + go: $t_src2 + base: int + overwriteElementBits: 8 + out: + - *qdpa_acc +- go: AddDotProdQuadrupleSaturated + asm: "VPDPBUSDS" + operandOrder: "31" # switch operand 3 and 1 + in: + - *qdpa_acc + - *qdpa_src1 + - *qdpa_src2 + out: + - *qdpa_acc +- go: AddDotProdPairs + asm: "VPDPWSSD" + in: + - &pdpa_acc + go: $t_acc + base: int + elemBits: 32 + - &pdpa_src1 + go: $t_src1 + base: int + overwriteElementBits: 16 + - &pdpa_src2 + go: $t_src2 + base: int + overwriteElementBits: 16 + out: + - *pdpa_acc +- go: AddDotProdPairsSaturated + asm: "VPDPWSSDS" + in: + - *pdpa_acc + - *pdpa_src1 + - *pdpa_src2 + out: + - *pdpa_acc +- go: MulAdd + asm: "VFMADD213PS|VFMADD213PD" + in: + - &fma_op + go: $t + base: float + - *fma_op + - *fma_op + out: + - *fma_op +- go: MulAddSub + asm: "VFMADDSUB213PS|VFMADDSUB213PD" + in: + - *fma_op + - *fma_op + - *fma_op + out: + - *fma_op +- go: MulSubAdd + asm: "VFMSUBADD213PS|VFMSUBADD213PD" + in: + - *fma_op + - *fma_op + - *fma_op + out: + - *fma_op \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/MinMax/categories.yaml b/src/simd/_gen/simdgen/ops/MinMax/categories.yaml new file mode 100644 index 00000000000000..a7e30f4693e87c --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MinMax/categories.yaml @@ -0,0 +1,9 @@ +!sum +- go: Max + commutative: true + documentation: !string |- + // NAME computes the maximum of corresponding elements. +- go: Min + commutative: true + documentation: !string |- + // NAME computes the minimum of corresponding elements. diff --git a/src/simd/_gen/simdgen/ops/MinMax/go.yaml b/src/simd/_gen/simdgen/ops/MinMax/go.yaml new file mode 100644 index 00000000000000..55f1e18b3dfbed --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MinMax/go.yaml @@ -0,0 +1,42 @@ +!sum +- go: Max + asm: "V?PMAXS[BWDQ]" + in: &2int + - &int + go: $t + base: int + - *int + out: &1int + - *int +- go: Max + asm: "V?PMAXU[BWDQ]" + in: &2uint + - &uint + go: $t + base: uint + - *uint + out: &1uint + - *uint + +- go: Min + asm: "V?PMINS[BWDQ]" + in: *2int + out: *1int +- go: Min + asm: "V?PMINU[BWDQ]" + in: *2uint + out: *1uint + +- go: Max + asm: "V?MAXP[SD]" + in: &2float + - &float + go: $t + base: float + - *float + out: &1float + - *float +- go: Min + asm: "V?MINP[SD]" + in: *2float + out: *1float diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml new file mode 100644 index 00000000000000..ef8e0360509333 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -0,0 +1,72 @@ +!sum +- go: SetElem + commutative: false + documentation: !string |- + // NAME sets a single constant-indexed element's value. +- go: GetElem + commutative: false + documentation: !string |- + // NAME retrieves a single constant-indexed element's value. +- go: SetLo + commutative: false + constImm: 0 + documentation: !string |- + // NAME returns x with its lower half set to y. +- go: GetLo + commutative: false + constImm: 0 + documentation: !string |- + // NAME returns the lower half of x. +- go: SetHi + commutative: false + constImm: 1 + documentation: !string |- + // NAME returns x with its upper half set to y. +- go: GetHi + commutative: false + constImm: 1 + documentation: !string |- + // NAME returns the upper half of x. +- go: Permute + commutative: false + documentation: !string |- + // NAME performs a full permutation of vector x using indices: + // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} + // Only the needed bits to represent x's index are used in indices' elements. +- go: Permute2 # Permute2 is only available on or after AVX512 + commutative: false + documentation: !string |- + // NAME performs a full permutation of vector x, y using indices: + // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} + // where xy is x appending y. + // Only the needed bits to represent xy's index are used in indices' elements. +- go: Compress + commutative: false + documentation: !string |- + // NAME performs a compression on vector x using mask by + // selecting elements as indicated by mask, and pack them to lower indexed elements. +- go: blend + commutative: false + documentation: !string |- + // NAME blends two vectors based on mask values, choosing either + // the first or the second based on whether the third is false or true +- go: Expand + commutative: false + documentation: !string |- + // NAME performs an expansion on a vector x whose elements are packed to lower parts. + // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +- go: Broadcast128 + commutative: false + documentation: !string |- + // NAME copies element zero of its (128-bit) input to all elements of + // the 128-bit output vector. +- go: Broadcast256 + commutative: false + documentation: !string |- + // NAME copies element zero of its (128-bit) input to all elements of + // the 256-bit output vector. +- go: Broadcast512 + commutative: false + documentation: !string |- + // NAME copies element zero of its (128-bit) input to all elements of + // the 512-bit output vector. diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml new file mode 100644 index 00000000000000..71981c12af7d12 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -0,0 +1,372 @@ +!sum +- go: SetElem + asm: "VPINSR[BWDQ]" + in: + - &t + class: vreg + base: $b + - class: greg + base: $b + lanes: 1 # Scalar, darn it! + - &imm + class: immediate + immOffset: 0 + name: index + out: + - *t + +- go: SetElem + asm: "VPINSR[DQ]" + in: + - &t + class: vreg + base: int + OverwriteBase: float + - class: greg + base: int + OverwriteBase: float + lanes: 1 # Scalar, darn it! + - &imm + class: immediate + immOffset: 0 + name: index + out: + - *t + +- go: GetElem + asm: "VPEXTR[BWDQ]" + in: + - class: vreg + base: $b + elemBits: $e + - *imm + out: + - class: greg + base: $b + bits: $e + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i8x2N + class: vreg + base: $t + OverwriteElementBits: 8 + - &i8xN + class: vreg + base: $t + OverwriteElementBits: 8 + - &imm01 # This immediate should be only 0 or 1 + class: immediate + const: 0 # place holder + name: index + out: + - *i8x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i8x2N + - *imm01 + out: + - *i8xN + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i16x2N + class: vreg + base: $t + OverwriteElementBits: 16 + - &i16xN + class: vreg + base: $t + OverwriteElementBits: 16 + - *imm01 + out: + - *i16x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i16x2N + - *imm01 + out: + - *i16xN + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i32x2N + class: vreg + base: $t + OverwriteElementBits: 32 + - &i32xN + class: vreg + base: $t + OverwriteElementBits: 32 + - *imm01 + out: + - *i32x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i32x2N + - *imm01 + out: + - *i32xN + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i64x2N + class: vreg + base: $t + OverwriteElementBits: 64 + - &i64xN + class: vreg + base: $t + OverwriteElementBits: 64 + - *imm01 + out: + - *i64x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i64x2N + - *imm01 + out: + - *i64xN + +- go: "SetHi|SetLo" + asm: "VINSERTF128|VINSERTF64X4" + inVariant: [] + in: + - &f32x2N + class: vreg + base: $t + OverwriteElementBits: 32 + - &f32xN + class: vreg + base: $t + OverwriteElementBits: 32 + - *imm01 + out: + - *f32x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTF128|VEXTRACTF64X4" + inVariant: [] + in: + - *f32x2N + - *imm01 + out: + - *f32xN + +- go: "SetHi|SetLo" + asm: "VINSERTF128|VINSERTF64X4" + inVariant: [] + in: + - &f64x2N + class: vreg + base: $t + OverwriteElementBits: 64 + - &f64xN + class: vreg + base: $t + OverwriteElementBits: 64 + - *imm01 + out: + - *f64x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTF128|VEXTRACTF64X4" + inVariant: [] + in: + - *f64x2N + - *imm01 + out: + - *f64xN + +- go: Permute + asm: "VPERM[BWDQ]|VPERMP[SD]" + operandOrder: "21Type1" + in: + - &anyindices + go: $t + name: indices + overwriteBase: uint + - &any + go: $t + out: + - *any + +- go: Permute2 + asm: "VPERMI2[BWDQ]|VPERMI2P[SD]" + # Because we are overwriting the receiver's type, we + # have to move the receiver to be a parameter so that + # we can have no duplication. + operandOrder: "231Type1" + in: + - *anyindices # result in arg 0 + - *any + - *any + out: + - *any + +- go: Compress + asm: "VPCOMPRESS[BWDQ]|VCOMPRESSP[SD]" + in: + # The mask in Compress is a control mask rather than a write mask, so it's not optional. + - class: mask + - *any + out: + - *any + +# For now a non-public method because +# (1) [OverwriteClass] must be set together with [OverwriteBase] +# (2) "simdgen does not support [OverwriteClass] in inputs". +# That means the signature is wrong. +- go: blend + asm: VPBLENDVB + in: + - &v + go: $t + class: vreg + base: int + - *v + - + class: vreg + base: int + name: mask + out: + - *v + +# For AVX512 +- go: blend + asm: VPBLENDM[BWDQ] + in: + - &v + go: $t + bits: 512 + class: vreg + base: int + - *v + inVariant: + - + class: mask + out: + - *v + +- go: Expand + asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]" + in: + # The mask in Expand is a control mask rather than a write mask, so it's not optional. + - class: mask + - *any + out: + - *any + +- go: Broadcast128 + asm: VPBROADCAST[BWDQ] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 128 + elemBits: $e + base: $b + +# weirdly, this one case on AVX2 is memory-operand-only +- go: Broadcast128 + asm: VPBROADCASTQ + in: + - class: vreg + bits: 128 + elemBits: 64 + base: int + OverwriteBase: float + out: + - class: vreg + bits: 128 + elemBits: 64 + base: int + OverwriteBase: float + +- go: Broadcast256 + asm: VPBROADCAST[BWDQ] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 256 + elemBits: $e + base: $b + +- go: Broadcast512 + asm: VPBROADCAST[BWDQ] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 512 + elemBits: $e + base: $b + +- go: Broadcast128 + asm: VBROADCASTS[SD] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 128 + elemBits: $e + base: $b + +- go: Broadcast256 + asm: VBROADCASTS[SD] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 256 + elemBits: $e + base: $b + +- go: Broadcast512 + asm: VBROADCASTS[SD] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 512 + elemBits: $e + base: $b diff --git a/src/simd/_gen/simdgen/ops/Mul/categories.yaml b/src/simd/_gen/simdgen/ops/Mul/categories.yaml new file mode 100644 index 00000000000000..92491b51d466e8 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Mul/categories.yaml @@ -0,0 +1,14 @@ +!sum +- go: Mul + commutative: true + documentation: !string |- + // NAME multiplies corresponding elements of two vectors. +- go: MulEvenWiden + commutative: true + documentation: !string |- + // NAME multiplies even-indexed elements, widening the result. + // Result[i] = v1.Even[i] * v2.Even[i]. +- go: MulHigh + commutative: true + documentation: !string |- + // NAME multiplies elements and stores the high part of the result. diff --git a/src/simd/_gen/simdgen/ops/Mul/go.yaml b/src/simd/_gen/simdgen/ops/Mul/go.yaml new file mode 100644 index 00000000000000..c0205a689975d7 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Mul/go.yaml @@ -0,0 +1,73 @@ +!sum +# "Normal" multiplication is only available for floats. +# This only covers the single and double precision. +- go: Mul + asm: "VMULP[SD]" + in: + - &fp + go: $t + base: float + - *fp + out: + - *fp + +# Integer multiplications. + +# MulEvenWiden +# Dword only. +- go: MulEvenWiden + asm: "VPMULDQ" + in: + - &intNot64 + go: $t + elemBits: 8|16|32 + base: int + - *intNot64 + out: + - &int2 + go: $t2 + base: int +- go: MulEvenWiden + asm: "VPMULUDQ" + in: + - &uintNot64 + go: $t + elemBits: 8|16|32 + base: uint + - *uintNot64 + out: + - &uint2 + go: $t2 + base: uint + +# MulHigh +# Word only. +- go: MulHigh + asm: "VPMULHW" + in: + - &int + go: $t + base: int + - *int + out: + - *int +- go: MulHigh + asm: "VPMULHUW" + in: + - &uint + go: $t + base: uint + - *uint + out: + - *uint + +# MulLow +# signed and unsigned are the same for lower bits. +- go: Mul + asm: "VPMULL[WDQ]" + in: + - &any + go: $t + - *any + out: + - *any diff --git a/src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml b/src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml new file mode 100644 index 00000000000000..0d0b006cfb6c85 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml @@ -0,0 +1,103 @@ +!sum +- go: ShiftAllLeft + nameAndSizeCheck: true + specialLower: sftimm + commutative: false + documentation: !string |- + // NAME shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +- go: ShiftAllRight + signed: false + nameAndSizeCheck: true + specialLower: sftimm + commutative: false + documentation: !string |- + // NAME shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +- go: ShiftAllRight + signed: true + specialLower: sftimm + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +- go: shiftAllLeftConst # no APIs, only ssa ops. + noTypes: "true" + noGenericOps: "true" + SSAVariant: "const" # to avoid its name colliding with reg version of this instruction, amend this to its ssa op name. + nameAndSizeCheck: true + commutative: false +- go: shiftAllRightConst # no APIs, only ssa ops. + noTypes: "true" + noGenericOps: "true" + SSAVariant: "const" + signed: false + nameAndSizeCheck: true + commutative: false +- go: shiftAllRightConst # no APIs, only ssa ops. + noTypes: "true" + noGenericOps: "true" + SSAVariant: "const" + signed: true + nameAndSizeCheck: true + commutative: false + +- go: ShiftLeft + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +- go: ShiftRight + signed: false + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +- go: ShiftRight + signed: true + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +- go: RotateAllLeft + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element to the left by the number of bits specified by the immediate. +- go: RotateLeft + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element in x to the left by the number of bits specified by y's corresponding elements. +- go: RotateAllRight + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element to the right by the number of bits specified by the immediate. +- go: RotateRight + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element in x to the right by the number of bits specified by y's corresponding elements. +- go: ShiftAllLeftConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the left by the number of bits specified by the + // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +- go: ShiftAllRightConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the right by the number of bits specified by the + // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +- go: ShiftLeftConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the left by the number of bits specified by the + // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +- go: ShiftRightConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the right by the number of bits specified by the + // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. diff --git a/src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml b/src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml new file mode 100644 index 00000000000000..e7ccdeb06ba49d --- /dev/null +++ b/src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml @@ -0,0 +1,172 @@ +!sum +# Integers +# ShiftAll* +- go: ShiftAllLeft + asm: "VPSLL[WDQ]" + in: + - &any + go: $t + - &vecAsScalar64 + go: "Uint.*" + treatLikeAScalarOfSize: 64 + out: + - *any +- go: ShiftAllRight + signed: false + asm: "VPSRL[WDQ]" + in: + - &uint + go: $t + base: uint + - *vecAsScalar64 + out: + - *uint +- go: ShiftAllRight + signed: true + asm: "VPSRA[WDQ]" + in: + - &int + go: $t + base: int + - *vecAsScalar64 + out: + - *int + +- go: shiftAllLeftConst + asm: "VPSLL[WDQ]" + in: + - *any + - &imm + class: immediate + immOffset: 0 + out: + - *any +- go: shiftAllRightConst + asm: "VPSRL[WDQ]" + in: + - *int + - *imm + out: + - *int +- go: shiftAllRightConst + asm: "VPSRA[WDQ]" + in: + - *uint + - *imm + out: + - *uint + +# Shift* (variable) +- go: ShiftLeft + asm: "VPSLLV[WD]" + in: + - *any + - *any + out: + - *any +# XED data of VPSLLVQ marks the element bits 32 which is off to the actual semantic, we need to overwrite +# it to 64. +- go: ShiftLeft + asm: "VPSLLVQ" + in: + - &anyOverwriteElemBits + go: $t + overwriteElementBits: 64 + - *anyOverwriteElemBits + out: + - *anyOverwriteElemBits +- go: ShiftRight + signed: false + asm: "VPSRLV[WD]" + in: + - *uint + - *uint + out: + - *uint +# XED data of VPSRLVQ needs the same overwrite as VPSLLVQ. +- go: ShiftRight + signed: false + asm: "VPSRLVQ" + in: + - &uintOverwriteElemBits + go: $t + base: uint + overwriteElementBits: 64 + - *uintOverwriteElemBits + out: + - *uintOverwriteElemBits +- go: ShiftRight + signed: true + asm: "VPSRAV[WDQ]" + in: + - *int + - *int + out: + - *int + +# Rotate +- go: RotateAllLeft + asm: "VPROL[DQ]" + in: + - *any + - &pureImm + class: immediate + immOffset: 0 + name: shift + out: + - *any +- go: RotateAllRight + asm: "VPROR[DQ]" + in: + - *any + - *pureImm + out: + - *any +- go: RotateLeft + asm: "VPROLV[DQ]" + in: + - *any + - *any + out: + - *any +- go: RotateRight + asm: "VPRORV[DQ]" + in: + - *any + - *any + out: + - *any + +# Bizzare shifts. +- go: ShiftAllLeftConcat + asm: "VPSHLD[WDQ]" + in: + - *any + - *any + - *pureImm + out: + - *any +- go: ShiftAllRightConcat + asm: "VPSHRD[WDQ]" + in: + - *any + - *any + - *pureImm + out: + - *any +- go: ShiftLeftConcat + asm: "VPSHLDV[WDQ]" + in: + - *any + - *any + - *any + out: + - *any +- go: ShiftRightConcat + asm: "VPSHRDV[WDQ]" + in: + - *any + - *any + - *any + out: + - *any diff --git a/src/simd/_gen/simdgen/pprint.go b/src/simd/_gen/simdgen/pprint.go new file mode 100644 index 00000000000000..054b51761d213a --- /dev/null +++ b/src/simd/_gen/simdgen/pprint.go @@ -0,0 +1,73 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "reflect" + "strconv" +) + +func pprints(v any) string { + var pp pprinter + pp.val(reflect.ValueOf(v), 0) + return string(pp.buf) +} + +type pprinter struct { + buf []byte +} + +func (p *pprinter) indent(by int) { + for range by { + p.buf = append(p.buf, '\t') + } +} + +func (p *pprinter) val(v reflect.Value, indent int) { + switch v.Kind() { + default: + p.buf = fmt.Appendf(p.buf, "unsupported kind %v", v.Kind()) + + case reflect.Bool: + p.buf = strconv.AppendBool(p.buf, v.Bool()) + + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + p.buf = strconv.AppendInt(p.buf, v.Int(), 10) + + case reflect.String: + p.buf = strconv.AppendQuote(p.buf, v.String()) + + case reflect.Pointer: + if v.IsNil() { + p.buf = append(p.buf, "nil"...) + } else { + p.buf = append(p.buf, "&"...) + p.val(v.Elem(), indent) + } + + case reflect.Slice, reflect.Array: + p.buf = append(p.buf, "[\n"...) + for i := range v.Len() { + p.indent(indent + 1) + p.val(v.Index(i), indent+1) + p.buf = append(p.buf, ",\n"...) + } + p.indent(indent) + p.buf = append(p.buf, ']') + + case reflect.Struct: + vt := v.Type() + p.buf = append(append(p.buf, vt.String()...), "{\n"...) + for f := range v.NumField() { + p.indent(indent + 1) + p.buf = append(append(p.buf, vt.Field(f).Name...), ": "...) + p.val(v.Field(f), indent+1) + p.buf = append(p.buf, ",\n"...) + } + p.indent(indent) + p.buf = append(p.buf, '}') + } +} diff --git a/src/simd/_gen/simdgen/sort_test.go b/src/simd/_gen/simdgen/sort_test.go new file mode 100644 index 00000000000000..399acf03fbe8af --- /dev/null +++ b/src/simd/_gen/simdgen/sort_test.go @@ -0,0 +1,41 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testing" + +func TestSort(t *testing.T) { + testCases := []struct { + s1, s2 string + want int + }{ + {"a1", "a2", -1}, + {"a11a", "a11b", -1}, + {"a01a1", "a1a01", -1}, + {"a2", "a1", 1}, + {"a10", "a2", 1}, + {"a1", "a10", -1}, + {"z11", "z2", 1}, + {"z2", "z11", -1}, + {"abc", "abd", -1}, + {"123", "45", 1}, + {"file1", "file1", 0}, + {"file", "file1", -1}, + {"file1", "file", 1}, + {"a01", "a1", -1}, + {"a1a", "a1b", -1}, + } + + for _, tc := range testCases { + got := compareNatural(tc.s1, tc.s2) + result := "✅" + if got != tc.want { + result = "❌" + t.Errorf("%s CompareNatural(\"%s\", \"%s\") -> got %2d, want %2d\n", result, tc.s1, tc.s2, got, tc.want) + } else { + t.Logf("%s CompareNatural(\"%s\", \"%s\") -> got %2d, want %2d\n", result, tc.s1, tc.s2, got, tc.want) + } + } +} diff --git a/src/simd/_gen/simdgen/types.yaml b/src/simd/_gen/simdgen/types.yaml new file mode 100644 index 00000000000000..f7a01cb360d306 --- /dev/null +++ b/src/simd/_gen/simdgen/types.yaml @@ -0,0 +1,90 @@ +# This file defines the possible types of each operand and result. +# +# In general, we're able to narrow this down on some attributes directly from +# the machine instruction descriptions, but the Go mappings need to further +# constrain them and how they relate. For example, on x86 we can't distinguish +# int and uint, though we can distinguish these from float. + +in: !repeat +- !sum &types + - {class: vreg, go: Int8x16, base: "int", elemBits: 8, bits: 128, lanes: 16} + - {class: vreg, go: Uint8x16, base: "uint", elemBits: 8, bits: 128, lanes: 16} + - {class: vreg, go: Int16x8, base: "int", elemBits: 16, bits: 128, lanes: 8} + - {class: vreg, go: Uint16x8, base: "uint", elemBits: 16, bits: 128, lanes: 8} + - {class: vreg, go: Int32x4, base: "int", elemBits: 32, bits: 128, lanes: 4} + - {class: vreg, go: Uint32x4, base: "uint", elemBits: 32, bits: 128, lanes: 4} + - {class: vreg, go: Int64x2, base: "int", elemBits: 64, bits: 128, lanes: 2} + - {class: vreg, go: Uint64x2, base: "uint", elemBits: 64, bits: 128, lanes: 2} + - {class: vreg, go: Float32x4, base: "float", elemBits: 32, bits: 128, lanes: 4} + - {class: vreg, go: Float64x2, base: "float", elemBits: 64, bits: 128, lanes: 2} + - {class: vreg, go: Int8x32, base: "int", elemBits: 8, bits: 256, lanes: 32} + - {class: vreg, go: Uint8x32, base: "uint", elemBits: 8, bits: 256, lanes: 32} + - {class: vreg, go: Int16x16, base: "int", elemBits: 16, bits: 256, lanes: 16} + - {class: vreg, go: Uint16x16, base: "uint", elemBits: 16, bits: 256, lanes: 16} + - {class: vreg, go: Int32x8, base: "int", elemBits: 32, bits: 256, lanes: 8} + - {class: vreg, go: Uint32x8, base: "uint", elemBits: 32, bits: 256, lanes: 8} + - {class: vreg, go: Int64x4, base: "int", elemBits: 64, bits: 256, lanes: 4} + - {class: vreg, go: Uint64x4, base: "uint", elemBits: 64, bits: 256, lanes: 4} + - {class: vreg, go: Float32x8, base: "float", elemBits: 32, bits: 256, lanes: 8} + - {class: vreg, go: Float64x4, base: "float", elemBits: 64, bits: 256, lanes: 4} + - {class: vreg, go: Int8x64, base: "int", elemBits: 8, bits: 512, lanes: 64} + - {class: vreg, go: Uint8x64, base: "uint", elemBits: 8, bits: 512, lanes: 64} + - {class: vreg, go: Int16x32, base: "int", elemBits: 16, bits: 512, lanes: 32} + - {class: vreg, go: Uint16x32, base: "uint", elemBits: 16, bits: 512, lanes: 32} + - {class: vreg, go: Int32x16, base: "int", elemBits: 32, bits: 512, lanes: 16} + - {class: vreg, go: Uint32x16, base: "uint", elemBits: 32, bits: 512, lanes: 16} + - {class: vreg, go: Int64x8, base: "int", elemBits: 64, bits: 512, lanes: 8} + - {class: vreg, go: Uint64x8, base: "uint", elemBits: 64, bits: 512, lanes: 8} + - {class: vreg, go: Float32x16, base: "float", elemBits: 32, bits: 512, lanes: 16} + - {class: vreg, go: Float64x8, base: "float", elemBits: 64, bits: 512, lanes: 8} + + - {class: mask, go: Mask8x16, base: "int", elemBits: 8, bits: 128, lanes: 16} + - {class: mask, go: Mask16x8, base: "int", elemBits: 16, bits: 128, lanes: 8} + - {class: mask, go: Mask32x4, base: "int", elemBits: 32, bits: 128, lanes: 4} + - {class: mask, go: Mask64x2, base: "int", elemBits: 64, bits: 128, lanes: 2} + - {class: mask, go: Mask8x32, base: "int", elemBits: 8, bits: 256, lanes: 32} + - {class: mask, go: Mask16x16, base: "int", elemBits: 16, bits: 256, lanes: 16} + - {class: mask, go: Mask32x8, base: "int", elemBits: 32, bits: 256, lanes: 8} + - {class: mask, go: Mask64x4, base: "int", elemBits: 64, bits: 256, lanes: 4} + - {class: mask, go: Mask8x64, base: "int", elemBits: 8, bits: 512, lanes: 64} + - {class: mask, go: Mask16x32, base: "int", elemBits: 16, bits: 512, lanes: 32} + - {class: mask, go: Mask32x16, base: "int", elemBits: 32, bits: 512, lanes: 16} + - {class: mask, go: Mask64x8, base: "int", elemBits: 64, bits: 512, lanes: 8} + + + - {class: greg, go: float64, base: "float", bits: 64, lanes: 1} + - {class: greg, go: float32, base: "float", bits: 32, lanes: 1} + - {class: greg, go: int64, base: "int", bits: 64, lanes: 1} + - {class: greg, go: int32, base: "int", bits: 32, lanes: 1} + - {class: greg, go: int16, base: "int", bits: 16, lanes: 1} + - {class: greg, go: int8, base: "int", bits: 8, lanes: 1} + - {class: greg, go: uint64, base: "uint", bits: 64, lanes: 1} + - {class: greg, go: uint32, base: "uint", bits: 32, lanes: 1} + - {class: greg, go: uint16, base: "uint", bits: 16, lanes: 1} + - {class: greg, go: uint8, base: "uint", bits: 8, lanes: 1} + +# Special shapes just to make INSERT[IF]128 work. +# The elemBits field of these shapes are wrong, it would be overwritten by overwriteElemBits. + - {class: vreg, go: Int8x16, base: "int", elemBits: 128, bits: 128, lanes: 16} + - {class: vreg, go: Uint8x16, base: "uint", elemBits: 128, bits: 128, lanes: 16} + - {class: vreg, go: Int16x8, base: "int", elemBits: 128, bits: 128, lanes: 8} + - {class: vreg, go: Uint16x8, base: "uint", elemBits: 128, bits: 128, lanes: 8} + - {class: vreg, go: Int32x4, base: "int", elemBits: 128, bits: 128, lanes: 4} + - {class: vreg, go: Uint32x4, base: "uint", elemBits: 128, bits: 128, lanes: 4} + - {class: vreg, go: Int64x2, base: "int", elemBits: 128, bits: 128, lanes: 2} + - {class: vreg, go: Uint64x2, base: "uint", elemBits: 128, bits: 128, lanes: 2} + + - {class: vreg, go: Int8x32, base: "int", elemBits: 128, bits: 256, lanes: 32} + - {class: vreg, go: Uint8x32, base: "uint", elemBits: 128, bits: 256, lanes: 32} + - {class: vreg, go: Int16x16, base: "int", elemBits: 128, bits: 256, lanes: 16} + - {class: vreg, go: Uint16x16, base: "uint", elemBits: 128, bits: 256, lanes: 16} + - {class: vreg, go: Int32x8, base: "int", elemBits: 128, bits: 256, lanes: 8} + - {class: vreg, go: Uint32x8, base: "uint", elemBits: 128, bits: 256, lanes: 8} + - {class: vreg, go: Int64x4, base: "int", elemBits: 128, bits: 256, lanes: 4} + - {class: vreg, go: Uint64x4, base: "uint", elemBits: 128, bits: 256, lanes: 4} + + - {class: immediate, go: Immediate} # TODO: we only support imms that are not used as value -- usually as instruction semantic predicate like VPCMP as of now. +inVariant: !repeat +- *types +out: !repeat +- *types diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go new file mode 100644 index 00000000000000..d749f433e37792 --- /dev/null +++ b/src/simd/_gen/simdgen/xed.go @@ -0,0 +1,780 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmp" + "fmt" + "log" + "maps" + "regexp" + "slices" + "strconv" + "strings" + + "golang.org/x/arch/x86/xeddata" + "gopkg.in/yaml.v3" + "simd/_gen/unify" +) + +const ( + NOT_REG_CLASS = 0 // not a register + VREG_CLASS = 1 // classify as a vector register; see + GREG_CLASS = 2 // classify as a general register +) + +// instVariant is a bitmap indicating a variant of an instruction that has +// optional parameters. +type instVariant uint8 + +const ( + instVariantNone instVariant = 0 + + // instVariantMasked indicates that this is the masked variant of an + // optionally-masked instruction. + instVariantMasked instVariant = 1 << iota +) + +var operandRemarks int + +// TODO: Doc. Returns Values with Def domains. +func loadXED(xedPath string) []*unify.Value { + // TODO: Obviously a bunch more to do here. + + db, err := xeddata.NewDatabase(xedPath) + if err != nil { + log.Fatalf("open database: %v", err) + } + + var defs []*unify.Value + err = xeddata.WalkInsts(xedPath, func(inst *xeddata.Inst) { + inst.Pattern = xeddata.ExpandStates(db, inst.Pattern) + + switch { + case inst.RealOpcode == "N": + return // Skip unstable instructions + case !strings.HasPrefix(inst.Extension, "AVX"): + // We're only interested in AVX instructions. + return + } + + if *flagDebugXED { + fmt.Printf("%s:\n%+v\n", inst.Pos, inst) + } + + ops, err := decodeOperands(db, strings.Fields(inst.Operands)) + if err != nil { + operandRemarks++ + if *Verbose { + log.Printf("%s: [%s] %s", inst.Pos, inst.Opcode(), err) + } + return + } + + applyQuirks(inst, ops) + + defsPos := len(defs) + defs = append(defs, instToUVal(inst, ops)...) + + if *flagDebugXED { + for i := defsPos; i < len(defs); i++ { + y, _ := yaml.Marshal(defs[i]) + fmt.Printf("==>\n%s\n", y) + } + } + }) + if err != nil { + log.Fatalf("walk insts: %v", err) + } + + if len(unknownFeatures) > 0 { + if !*Verbose { + nInst := 0 + for _, insts := range unknownFeatures { + nInst += len(insts) + } + log.Printf("%d unhandled CPU features for %d instructions (use -v for details)", len(unknownFeatures), nInst) + } else { + keys := slices.SortedFunc(maps.Keys(unknownFeatures), func(a, b cpuFeatureKey) int { + return cmp.Or(cmp.Compare(a.Extension, b.Extension), + cmp.Compare(a.ISASet, b.ISASet)) + }) + for _, key := range keys { + if key.ISASet == "" || key.ISASet == key.Extension { + log.Printf("unhandled Extension %s", key.Extension) + } else { + log.Printf("unhandled Extension %s and ISASet %s", key.Extension, key.ISASet) + } + log.Printf(" opcodes: %s", slices.Sorted(maps.Keys(unknownFeatures[key]))) + } + } + } + + return defs +} + +var ( + maskRequiredRe = regexp.MustCompile(`VPCOMPRESS[BWDQ]|VCOMPRESSP[SD]|VPEXPAND[BWDQ]|VEXPANDP[SD]`) + maskOptionalRe = regexp.MustCompile(`VPCMP(EQ|GT|U)?[BWDQ]|VCMPP[SD]`) +) + +func applyQuirks(inst *xeddata.Inst, ops []operand) { + opc := inst.Opcode() + switch { + case maskRequiredRe.MatchString(opc): + // The mask on these instructions is marked optional, but the + // instruction is pointless without the mask. + for i, op := range ops { + if op, ok := op.(operandMask); ok { + op.optional = false + ops[i] = op + } + } + + case maskOptionalRe.MatchString(opc): + // Conversely, these masks should be marked optional and aren't. + for i, op := range ops { + if op, ok := op.(operandMask); ok && op.action.r { + op.optional = true + ops[i] = op + } + } + } +} + +type operandCommon struct { + action operandAction +} + +// operandAction defines whether this operand is read and/or written. +// +// TODO: Should this live in [xeddata.Operand]? +type operandAction struct { + r bool // Read + w bool // Written + cr bool // Read is conditional (implies r==true) + cw bool // Write is conditional (implies w==true) +} + +type operandMem struct { + operandCommon + // TODO +} + +type vecShape struct { + elemBits int // Element size in bits + bits int // Register width in bits (total vector bits) +} + +type operandVReg struct { // Vector register + operandCommon + vecShape + elemBaseType scalarBaseType +} + +type operandGReg struct { // Vector register + operandCommon + vecShape + elemBaseType scalarBaseType +} + +// operandMask is a vector mask. +// +// Regardless of the actual mask representation, the [vecShape] of this operand +// corresponds to the "bit for bit" type of mask. That is, elemBits gives the +// element width covered by each mask element, and bits/elemBits gives the total +// number of mask elements. (bits gives the total number of bits as if this were +// a bit-for-bit mask, which may be meaningless on its own.) +type operandMask struct { + operandCommon + vecShape + // Bits in the mask is w/bits. + + allMasks bool // If set, size cannot be inferred because all operands are masks. + + // Mask can be omitted, in which case it defaults to K0/"no mask" + optional bool +} + +type operandImm struct { + operandCommon + bits int // Immediate size in bits +} + +type operand interface { + common() operandCommon + addToDef(b *unify.DefBuilder) +} + +func strVal(s any) *unify.Value { + return unify.NewValue(unify.NewStringExact(fmt.Sprint(s))) +} + +func (o operandCommon) common() operandCommon { + return o +} + +func (o operandMem) addToDef(b *unify.DefBuilder) { + // TODO: w, base + b.Add("class", strVal("memory")) +} + +func (o operandVReg) addToDef(b *unify.DefBuilder) { + baseDomain, err := unify.NewStringRegex(o.elemBaseType.regex()) + if err != nil { + panic("parsing baseRe: " + err.Error()) + } + b.Add("class", strVal("vreg")) + b.Add("bits", strVal(o.bits)) + b.Add("base", unify.NewValue(baseDomain)) + // If elemBits == bits, then the vector can be ANY shape. This happens with, + // for example, logical ops. + if o.elemBits != o.bits { + b.Add("elemBits", strVal(o.elemBits)) + } +} + +func (o operandGReg) addToDef(b *unify.DefBuilder) { + baseDomain, err := unify.NewStringRegex(o.elemBaseType.regex()) + if err != nil { + panic("parsing baseRe: " + err.Error()) + } + b.Add("class", strVal("greg")) + b.Add("bits", strVal(o.bits)) + b.Add("base", unify.NewValue(baseDomain)) + if o.elemBits != o.bits { + b.Add("elemBits", strVal(o.elemBits)) + } +} + +func (o operandMask) addToDef(b *unify.DefBuilder) { + b.Add("class", strVal("mask")) + if o.allMasks { + // If all operands are masks, omit sizes and let unification determine mask sizes. + return + } + b.Add("elemBits", strVal(o.elemBits)) + b.Add("bits", strVal(o.bits)) +} + +func (o operandImm) addToDef(b *unify.DefBuilder) { + b.Add("class", strVal("immediate")) + b.Add("bits", strVal(o.bits)) +} + +var actionEncoding = map[string]operandAction{ + "r": {r: true}, + "cr": {r: true, cr: true}, + "w": {w: true}, + "cw": {w: true, cw: true}, + "rw": {r: true, w: true}, + "crw": {r: true, w: true, cr: true}, + "rcw": {r: true, w: true, cw: true}, +} + +func decodeOperand(db *xeddata.Database, operand string) (operand, error) { + op, err := xeddata.NewOperand(db, operand) + if err != nil { + log.Fatalf("parsing operand %q: %v", operand, err) + } + if *flagDebugXED { + fmt.Printf(" %+v\n", op) + } + + if strings.HasPrefix(op.Name, "EMX_BROADCAST") { + // This refers to a set of macros defined in all-state.txt that set a + // BCAST operand to various fixed values. But the BCAST operand is + // itself suppressed and "internal", so I think we can just ignore this + // operand. + return nil, nil + } + + // TODO: See xed_decoded_inst_operand_action. This might need to be more + // complicated. + action, ok := actionEncoding[op.Action] + if !ok { + return nil, fmt.Errorf("unknown action %q", op.Action) + } + common := operandCommon{action: action} + + lhs := op.NameLHS() + if strings.HasPrefix(lhs, "MEM") { + // TODO: Width, base type + return operandMem{ + operandCommon: common, + }, nil + } else if strings.HasPrefix(lhs, "REG") { + if op.Width == "mskw" { + // The mask operand doesn't specify a width. We have to infer it. + // + // XED uses the marker ZEROSTR to indicate that a mask operand is + // optional and, if omitted, implies K0, aka "no mask". + return operandMask{ + operandCommon: common, + optional: op.Attributes["TXT=ZEROSTR"], + }, nil + } else { + class, regBits := decodeReg(op) + if class == NOT_REG_CLASS { + return nil, fmt.Errorf("failed to decode register %q", operand) + } + baseType, elemBits, ok := decodeType(op) + if !ok { + return nil, fmt.Errorf("failed to decode register width %q", operand) + } + shape := vecShape{elemBits: elemBits, bits: regBits} + if class == VREG_CLASS { + return operandVReg{ + operandCommon: common, + vecShape: shape, + elemBaseType: baseType, + }, nil + } + // general register + m := min(shape.bits, shape.elemBits) + shape.bits, shape.elemBits = m, m + return operandGReg{ + operandCommon: common, + vecShape: shape, + elemBaseType: baseType, + }, nil + + } + } else if strings.HasPrefix(lhs, "IMM") { + _, bits, ok := decodeType(op) + if !ok { + return nil, fmt.Errorf("failed to decode register width %q", operand) + } + return operandImm{ + operandCommon: common, + bits: bits, + }, nil + } + + // TODO: BASE and SEG + return nil, fmt.Errorf("unknown operand LHS %q in %q", lhs, operand) +} + +func decodeOperands(db *xeddata.Database, operands []string) (ops []operand, err error) { + // Decode the XED operand descriptions. + for _, o := range operands { + op, err := decodeOperand(db, o) + if err != nil { + return nil, err + } + if op != nil { + ops = append(ops, op) + } + } + + // XED doesn't encode the size of mask operands. If there are mask operands, + // try to infer their sizes from other operands. + if err := inferMaskSizes(ops); err != nil { + return nil, fmt.Errorf("%w in operands %+v", err, operands) + } + + return ops, nil +} + +func inferMaskSizes(ops []operand) error { + // This is a heuristic and it falls apart in some cases: + // + // - Mask operations like KAND[BWDQ] have *nothing* in the XED to indicate + // mask size. + // + // - VINSERT*, VPSLL*, VPSRA*, and VPSRL* and some others naturally have + // mixed input sizes and the XED doesn't indicate which operands the mask + // applies to. + // + // - VPDP* and VP4DP* have really complex mixed operand patterns. + // + // I think for these we may just have to hand-write a table of which + // operands each mask applies to. + inferMask := func(r, w bool) error { + var masks []int + var rSizes, wSizes, sizes []vecShape + allMasks := true + hasWMask := false + for i, op := range ops { + action := op.common().action + if _, ok := op.(operandMask); ok { + if action.r && action.w { + return fmt.Errorf("unexpected rw mask") + } + if action.r == r || action.w == w { + masks = append(masks, i) + } + if action.w { + hasWMask = true + } + } else { + allMasks = false + if reg, ok := op.(operandVReg); ok { + if action.r { + rSizes = append(rSizes, reg.vecShape) + } + if action.w { + wSizes = append(wSizes, reg.vecShape) + } + } + } + } + if len(masks) == 0 { + return nil + } + + if r { + sizes = rSizes + if len(sizes) == 0 { + sizes = wSizes + } + } + if w { + sizes = wSizes + if len(sizes) == 0 { + sizes = rSizes + } + } + + if len(sizes) == 0 { + // If all operands are masks, leave the mask inferrence to the users. + if allMasks { + for _, i := range masks { + m := ops[i].(operandMask) + m.allMasks = true + ops[i] = m + } + return nil + } + return fmt.Errorf("cannot infer mask size: no register operands") + } + shape, ok := singular(sizes) + if !ok { + if !hasWMask && len(wSizes) == 1 && len(masks) == 1 { + // This pattern looks like predicate mask, so its shape should align with the + // output. TODO: verify this is a safe assumption. + shape = wSizes[0] + } else { + return fmt.Errorf("cannot infer mask size: multiple register sizes %v", sizes) + } + } + for _, i := range masks { + m := ops[i].(operandMask) + m.vecShape = shape + ops[i] = m + } + return nil + } + if err := inferMask(true, false); err != nil { + return err + } + if err := inferMask(false, true); err != nil { + return err + } + return nil +} + +// addOperandstoDef adds "in", "inVariant", and "out" to an instruction Def. +// +// Optional mask input operands are added to the inVariant field if +// variant&instVariantMasked, and omitted otherwise. +func addOperandsToDef(ops []operand, instDB *unify.DefBuilder, variant instVariant) { + var inVals, inVar, outVals []*unify.Value + asmPos := 0 + for _, op := range ops { + var db unify.DefBuilder + op.addToDef(&db) + db.Add("asmPos", unify.NewValue(unify.NewStringExact(fmt.Sprint(asmPos)))) + + action := op.common().action + asmCount := 1 // # of assembly operands; 0 or 1 + if action.r { + inVal := unify.NewValue(db.Build()) + // If this is an optional mask, put it in the input variant tuple. + if mask, ok := op.(operandMask); ok && mask.optional { + if variant&instVariantMasked != 0 { + inVar = append(inVar, inVal) + } else { + // This operand doesn't appear in the assembly at all. + asmCount = 0 + } + } else { + // Just a regular input operand. + inVals = append(inVals, inVal) + } + } + if action.w { + outVal := unify.NewValue(db.Build()) + outVals = append(outVals, outVal) + } + + asmPos += asmCount + } + + instDB.Add("in", unify.NewValue(unify.NewTuple(inVals...))) + instDB.Add("inVariant", unify.NewValue(unify.NewTuple(inVar...))) + instDB.Add("out", unify.NewValue(unify.NewTuple(outVals...))) +} + +func instToUVal(inst *xeddata.Inst, ops []operand) []*unify.Value { + feature, ok := decodeCPUFeature(inst) + if !ok { + return nil + } + + var vals []*unify.Value + vals = append(vals, instToUVal1(inst, ops, feature, instVariantNone)) + if hasOptionalMask(ops) { + vals = append(vals, instToUVal1(inst, ops, feature, instVariantMasked)) + } + return vals +} + +func instToUVal1(inst *xeddata.Inst, ops []operand, feature string, variant instVariant) *unify.Value { + var db unify.DefBuilder + db.Add("goarch", unify.NewValue(unify.NewStringExact("amd64"))) + db.Add("asm", unify.NewValue(unify.NewStringExact(inst.Opcode()))) + addOperandsToDef(ops, &db, variant) + db.Add("cpuFeature", unify.NewValue(unify.NewStringExact(feature))) + + if strings.Contains(inst.Pattern, "ZEROING=0") { + // This is an EVEX instruction, but the ".Z" (zero-merging) + // instruction flag is NOT valid. EVEX.z must be zero. + // + // This can mean a few things: + // + // - The output of an instruction is a mask, so merging modes don't + // make any sense. E.g., VCMPPS. + // + // - There are no masks involved anywhere. (Maybe MASK=0 is also set + // in this case?) E.g., VINSERTPS. + // + // - The operation inherently performs merging. E.g., VCOMPRESSPS + // with a mem operand. + // + // There may be other reasons. + db.Add("zeroing", unify.NewValue(unify.NewStringExact("false"))) + } + pos := unify.Pos{Path: inst.Pos.Path, Line: inst.Pos.Line} + return unify.NewValuePos(db.Build(), pos) +} + +// decodeCPUFeature returns the CPU feature name required by inst. These match +// the names of the "Has*" feature checks in the simd package. +func decodeCPUFeature(inst *xeddata.Inst) (string, bool) { + key := cpuFeatureKey{ + Extension: inst.Extension, + ISASet: isaSetStrip.ReplaceAllLiteralString(inst.ISASet, ""), + } + feat, ok := cpuFeatureMap[key] + if !ok { + imap := unknownFeatures[key] + if imap == nil { + imap = make(map[string]struct{}) + unknownFeatures[key] = imap + } + imap[inst.Opcode()] = struct{}{} + return "", false + } + if feat == "ignore" { + return "", false + } + return feat, true +} + +var isaSetStrip = regexp.MustCompile("_(128N?|256N?|512)$") + +type cpuFeatureKey struct { + Extension, ISASet string +} + +// cpuFeatureMap maps from XED's "EXTENSION" and "ISA_SET" to a CPU feature name +// that can be used in the SIMD API. +var cpuFeatureMap = map[cpuFeatureKey]string{ + {"AVX", ""}: "AVX", + {"AVX_VNNI", "AVX_VNNI"}: "AVXVNNI", + {"AVX2", ""}: "AVX2", + + // AVX-512 foundational features. We combine all of these into one "AVX512" feature. + {"AVX512EVEX", "AVX512F"}: "AVX512", + {"AVX512EVEX", "AVX512CD"}: "AVX512", + {"AVX512EVEX", "AVX512BW"}: "AVX512", + {"AVX512EVEX", "AVX512DQ"}: "AVX512", + // AVX512VL doesn't appear explicitly in the ISASet. I guess it's implied by + // the vector length suffix. + + // AVX-512 extension features + {"AVX512EVEX", "AVX512_BITALG"}: "AVX512BITALG", + {"AVX512EVEX", "AVX512_GFNI"}: "AVX512GFNI", + {"AVX512EVEX", "AVX512_VBMI2"}: "AVX512VBMI2", + {"AVX512EVEX", "AVX512_VBMI"}: "AVX512VBMI", + {"AVX512EVEX", "AVX512_VNNI"}: "AVX512VNNI", + {"AVX512EVEX", "AVX512_VPOPCNTDQ"}: "AVX512VPOPCNTDQ", + + // AVX 10.2 (not yet supported) + {"AVX512EVEX", "AVX10_2_RC"}: "ignore", +} + +var unknownFeatures = map[cpuFeatureKey]map[string]struct{}{} + +// hasOptionalMask returns whether there is an optional mask operand in ops. +func hasOptionalMask(ops []operand) bool { + for _, op := range ops { + if op, ok := op.(operandMask); ok && op.optional { + return true + } + } + return false +} + +func singular[T comparable](xs []T) (T, bool) { + if len(xs) == 0 { + return *new(T), false + } + for _, x := range xs[1:] { + if x != xs[0] { + return *new(T), false + } + } + return xs[0], true +} + +// decodeReg returns class (NOT_REG_CLASS, VREG_CLASS, GREG_CLASS), +// and width in bits. If the operand cannot be decided as a register, +// then the clas is NOT_REG_CLASS. +func decodeReg(op *xeddata.Operand) (class, width int) { + // op.Width tells us the total width, e.g.,: + // + // dq => 128 bits (XMM) + // qq => 256 bits (YMM) + // mskw => K + // z[iuf?](8|16|32|...) => 512 bits (ZMM) + // + // But the encoding is really weird and it's not clear if these *always* + // mean XMM/YMM/ZMM or if other irregular things can use these large widths. + // Hence, we dig into the register sets themselves. + + if !strings.HasPrefix(op.NameLHS(), "REG") { + return NOT_REG_CLASS, 0 + } + // TODO: We shouldn't be relying on the macro naming conventions. We should + // use all-dec-patterns.txt, but xeddata doesn't support that table right now. + rhs := op.NameRHS() + if !strings.HasSuffix(rhs, "()") { + return NOT_REG_CLASS, 0 + } + switch { + case strings.HasPrefix(rhs, "XMM_"): + return VREG_CLASS, 128 + case strings.HasPrefix(rhs, "YMM_"): + return VREG_CLASS, 256 + case strings.HasPrefix(rhs, "ZMM_"): + return VREG_CLASS, 512 + case strings.HasPrefix(rhs, "GPR64_"), strings.HasPrefix(rhs, "VGPR64_"): + return GREG_CLASS, 64 + case strings.HasPrefix(rhs, "GPR32_"), strings.HasPrefix(rhs, "VGPR32_"): + return GREG_CLASS, 32 + } + return NOT_REG_CLASS, 0 +} + +var xtypeRe = regexp.MustCompile(`^([iuf])([0-9]+)$`) + +// scalarBaseType describes the base type of a scalar element. This is a Go +// type, but without the bit width suffix (with the exception of +// scalarBaseIntOrUint). +type scalarBaseType int + +const ( + scalarBaseInt scalarBaseType = iota + scalarBaseUint + scalarBaseIntOrUint // Signed or unsigned is unspecified + scalarBaseFloat + scalarBaseComplex + scalarBaseBFloat + scalarBaseHFloat +) + +func (s scalarBaseType) regex() string { + switch s { + case scalarBaseInt: + return "int" + case scalarBaseUint: + return "uint" + case scalarBaseIntOrUint: + return "int|uint" + case scalarBaseFloat: + return "float" + case scalarBaseComplex: + return "complex" + case scalarBaseBFloat: + return "BFloat" + case scalarBaseHFloat: + return "HFloat" + } + panic(fmt.Sprintf("unknown scalar base type %d", s)) +} + +func decodeType(op *xeddata.Operand) (base scalarBaseType, bits int, ok bool) { + // The xtype tells you the element type. i8, i16, i32, i64, f32, etc. + // + // TODO: Things like AVX2 VPAND have an xtype of u256 because they're + // element-width agnostic. Do I map that to all widths, or just omit the + // element width and let unification flesh it out? There's no u512 + // (presumably those are all masked, so elem width matters). These are all + // Category: LOGICAL, so maybe we could use that info? + + // Handle some weird ones. + switch op.Xtype { + // 8-bit float formats as defined by Open Compute Project "OCP 8-bit + // Floating Point Specification (OFP8)". + case "bf8": // E5M2 float + return scalarBaseBFloat, 8, true + case "hf8": // E4M3 float + return scalarBaseHFloat, 8, true + case "bf16": // bfloat16 float + return scalarBaseBFloat, 16, true + case "2f16": + // Complex consisting of 2 float16s. Doesn't exist in Go, but we can say + // what it would be. + return scalarBaseComplex, 32, true + case "2i8", "2I8": + // These just use the lower INT8 in each 16 bit field. + // As far as I can tell, "2I8" is a typo. + return scalarBaseInt, 8, true + case "2u16", "2U16": + // some VPDP* has it + // TODO: does "z" means it has zeroing? + return scalarBaseUint, 16, true + case "2i16", "2I16": + // some VPDP* has it + return scalarBaseInt, 16, true + case "4u8", "4U8": + // some VPDP* has it + return scalarBaseUint, 8, true + case "4i8", "4I8": + // some VPDP* has it + return scalarBaseInt, 8, true + } + + // The rest follow a simple pattern. + m := xtypeRe.FindStringSubmatch(op.Xtype) + if m == nil { + // TODO: Report unrecognized xtype + return 0, 0, false + } + bits, _ = strconv.Atoi(m[2]) + switch m[1] { + case "i", "u": + // XED is rather inconsistent about what's signed, unsigned, or doesn't + // matter, so merge them together and let the Go definitions narrow as + // appropriate. Maybe there's a better way to do this. + return scalarBaseIntOrUint, bits, true + case "f": + return scalarBaseFloat, bits, true + default: + panic("unreachable") + } +} diff --git a/src/simd/_gen/unify/closure.go b/src/simd/_gen/unify/closure.go new file mode 100644 index 00000000000000..e8e76e215143ad --- /dev/null +++ b/src/simd/_gen/unify/closure.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "maps" + "slices" +) + +type Closure struct { + val *Value + env envSet +} + +func NewSum(vs ...*Value) Closure { + id := &ident{name: "sum"} + return Closure{NewValue(Var{id}), topEnv.bind(id, vs...)} +} + +// IsBottom returns whether c consists of no values. +func (c Closure) IsBottom() bool { + return c.val.Domain == nil +} + +// Summands returns the top-level Values of c. This assumes the top-level of c +// was constructed as a sum, and is mostly useful for debugging. +func (c Closure) Summands() iter.Seq[*Value] { + return func(yield func(*Value) bool) { + var rec func(v *Value, env envSet) bool + rec = func(v *Value, env envSet) bool { + switch d := v.Domain.(type) { + case Var: + parts := env.partitionBy(d.id) + for _, part := range parts { + // It may be a sum of sums. Walk into this value. + if !rec(part.value, part.env) { + return false + } + } + return true + default: + return yield(v) + } + } + rec(c.val, c.env) + } +} + +// All enumerates all possible concrete values of c by substituting variables +// from the environment. +// +// E.g., enumerating this Value +// +// a: !sum [1, 2] +// b: !sum [3, 4] +// +// results in +// +// - {a: 1, b: 3} +// - {a: 1, b: 4} +// - {a: 2, b: 3} +// - {a: 2, b: 4} +func (c Closure) All() iter.Seq[*Value] { + // In order to enumerate all concrete values under all possible variable + // bindings, we use a "non-deterministic continuation passing style" to + // implement this. We use CPS to traverse the Value tree, threading the + // (possibly narrowing) environment through that CPS following an Euler + // tour. Where the environment permits multiple choices, we invoke the same + // continuation for each choice. Similar to a yield function, the + // continuation can return false to stop the non-deterministic walk. + return func(yield func(*Value) bool) { + c.val.all1(c.env, func(v *Value, e envSet) bool { + return yield(v) + }) + } +} + +func (v *Value) all1(e envSet, cont func(*Value, envSet) bool) bool { + switch d := v.Domain.(type) { + default: + panic(fmt.Sprintf("unknown domain type %T", d)) + + case nil: + return true + + case Top, String: + return cont(v, e) + + case Def: + fields := d.keys() + // We can reuse this parts slice because we're doing a DFS through the + // state space. (Otherwise, we'd have to do some messy threading of an + // immutable slice-like value through allElt.) + parts := make(map[string]*Value, len(fields)) + + // TODO: If there are no Vars or Sums under this Def, then nothing can + // change the Value or env, so we could just cont(v, e). + var allElt func(elt int, e envSet) bool + allElt = func(elt int, e envSet) bool { + if elt == len(fields) { + // Build a new Def from the concrete parts. Clone parts because + // we may reuse it on other non-deterministic branches. + nVal := newValueFrom(Def{maps.Clone(parts)}, v) + return cont(nVal, e) + } + + return d.fields[fields[elt]].all1(e, func(v *Value, e envSet) bool { + parts[fields[elt]] = v + return allElt(elt+1, e) + }) + } + return allElt(0, e) + + case Tuple: + // Essentially the same as Def. + if d.repeat != nil { + // There's nothing we can do with this. + return cont(v, e) + } + parts := make([]*Value, len(d.vs)) + var allElt func(elt int, e envSet) bool + allElt = func(elt int, e envSet) bool { + if elt == len(d.vs) { + // Build a new tuple from the concrete parts. Clone parts because + // we may reuse it on other non-deterministic branches. + nVal := newValueFrom(Tuple{vs: slices.Clone(parts)}, v) + return cont(nVal, e) + } + + return d.vs[elt].all1(e, func(v *Value, e envSet) bool { + parts[elt] = v + return allElt(elt+1, e) + }) + } + return allElt(0, e) + + case Var: + // Go each way this variable can be bound. + for _, ePart := range e.partitionBy(d.id) { + // d.id is no longer bound in this environment partition. We'll may + // need it later in the Euler tour, so bind it back to this single + // value. + env := ePart.env.bind(d.id, ePart.value) + if !ePart.value.all1(env, cont) { + return false + } + } + return true + } +} diff --git a/src/simd/_gen/unify/domain.go b/src/simd/_gen/unify/domain.go new file mode 100644 index 00000000000000..1e0f2be63d739a --- /dev/null +++ b/src/simd/_gen/unify/domain.go @@ -0,0 +1,359 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "maps" + "reflect" + "regexp" + "slices" + "strconv" + "strings" +) + +// A Domain is a non-empty set of values, all of the same kind. +// +// Domain may be a scalar: +// +// - [String] - Represents string-typed values. +// +// Or a composite: +// +// - [Def] - A mapping from fixed keys to [Domain]s. +// +// - [Tuple] - A fixed-length sequence of [Domain]s or +// all possible lengths repeating a [Domain]. +// +// Or top or bottom: +// +// - [Top] - Represents all possible values of all kinds. +// +// - nil - Represents no values. +// +// Or a variable: +// +// - [Var] - A value captured in the environment. +type Domain interface { + Exact() bool + WhyNotExact() string + + // decode stores this value in a Go value. If this value is not exact, this + // returns a potentially wrapped *inexactError. + decode(reflect.Value) error +} + +type inexactError struct { + valueType string + goType string +} + +func (e *inexactError) Error() string { + return fmt.Sprintf("cannot store inexact %s value in %s", e.valueType, e.goType) +} + +type decodeError struct { + path string + err error +} + +func newDecodeError(path string, err error) *decodeError { + if err, ok := err.(*decodeError); ok { + return &decodeError{path: path + "." + err.path, err: err.err} + } + return &decodeError{path: path, err: err} +} + +func (e *decodeError) Unwrap() error { + return e.err +} + +func (e *decodeError) Error() string { + return fmt.Sprintf("%s: %s", e.path, e.err) +} + +// Top represents all possible values of all possible types. +type Top struct{} + +func (t Top) Exact() bool { return false } +func (t Top) WhyNotExact() string { return "is top" } + +func (t Top) decode(rv reflect.Value) error { + // We can decode Top into a pointer-typed value as nil. + if rv.Kind() != reflect.Pointer { + return &inexactError{"top", rv.Type().String()} + } + rv.SetZero() + return nil +} + +// A Def is a mapping from field names to [Value]s. Any fields not explicitly +// listed have [Value] [Top]. +type Def struct { + fields map[string]*Value +} + +// A DefBuilder builds a [Def] one field at a time. The zero value is an empty +// [Def]. +type DefBuilder struct { + fields map[string]*Value +} + +func (b *DefBuilder) Add(name string, v *Value) { + if b.fields == nil { + b.fields = make(map[string]*Value) + } + if _, ok := b.fields[name]; ok { + panic(fmt.Sprintf("duplicate field %q", name)) + } + b.fields[name] = v +} + +// Build constructs a [Def] from the fields added to this builder. +func (b *DefBuilder) Build() Def { + return Def{maps.Clone(b.fields)} +} + +// Exact returns true if all field Values are exact. +func (d Def) Exact() bool { + for _, v := range d.fields { + if !v.Exact() { + return false + } + } + return true +} + +// WhyNotExact returns why the value is not exact +func (d Def) WhyNotExact() string { + for s, v := range d.fields { + if !v.Exact() { + w := v.WhyNotExact() + return "field " + s + ": " + w + } + } + return "" +} + +func (d Def) decode(rv reflect.Value) error { + if rv.Kind() != reflect.Struct { + return fmt.Errorf("cannot decode Def into %s", rv.Type()) + } + + var lowered map[string]string // Lower case -> canonical for d.fields. + rt := rv.Type() + for fi := range rv.NumField() { + fType := rt.Field(fi) + if fType.PkgPath != "" { + continue + } + v := d.fields[fType.Name] + if v == nil { + v = topValue + + // Try a case-insensitive match + canon, ok := d.fields[strings.ToLower(fType.Name)] + if ok { + v = canon + } else { + if lowered == nil { + lowered = make(map[string]string, len(d.fields)) + for k := range d.fields { + l := strings.ToLower(k) + if k != l { + lowered[l] = k + } + } + } + canon, ok := lowered[strings.ToLower(fType.Name)] + if ok { + v = d.fields[canon] + } + } + } + if err := decodeReflect(v, rv.Field(fi)); err != nil { + return newDecodeError(fType.Name, err) + } + } + return nil +} + +func (d Def) keys() []string { + return slices.Sorted(maps.Keys(d.fields)) +} + +func (d Def) All() iter.Seq2[string, *Value] { + // TODO: We call All fairly often. It's probably bad to sort this every + // time. + keys := slices.Sorted(maps.Keys(d.fields)) + return func(yield func(string, *Value) bool) { + for _, k := range keys { + if !yield(k, d.fields[k]) { + return + } + } + } +} + +// A Tuple is a sequence of Values in one of two forms: 1. a fixed-length tuple, +// where each Value can be different or 2. a "repeated tuple", which is a Value +// repeated 0 or more times. +type Tuple struct { + vs []*Value + + // repeat, if non-nil, means this Tuple consists of an element repeated 0 or + // more times. If repeat is non-nil, vs must be nil. This is a generator + // function because we don't necessarily want *exactly* the same Value + // repeated. For example, in YAML encoding, a !sum in a repeated tuple needs + // a fresh variable in each instance. + repeat []func(envSet) (*Value, envSet) +} + +func NewTuple(vs ...*Value) Tuple { + return Tuple{vs: vs} +} + +func NewRepeat(gens ...func(envSet) (*Value, envSet)) Tuple { + return Tuple{repeat: gens} +} + +func (d Tuple) Exact() bool { + if d.repeat != nil { + return false + } + for _, v := range d.vs { + if !v.Exact() { + return false + } + } + return true +} + +func (d Tuple) WhyNotExact() string { + if d.repeat != nil { + return "d.repeat is not nil" + } + for i, v := range d.vs { + if !v.Exact() { + w := v.WhyNotExact() + return "index " + strconv.FormatInt(int64(i), 10) + ": " + w + } + } + return "" +} + +func (d Tuple) decode(rv reflect.Value) error { + if d.repeat != nil { + return &inexactError{"repeated tuple", rv.Type().String()} + } + // TODO: We could also do arrays. + if rv.Kind() != reflect.Slice { + return fmt.Errorf("cannot decode Tuple into %s", rv.Type()) + } + if rv.IsNil() || rv.Cap() < len(d.vs) { + rv.Set(reflect.MakeSlice(rv.Type(), len(d.vs), len(d.vs))) + } else { + rv.SetLen(len(d.vs)) + } + for i, v := range d.vs { + if err := decodeReflect(v, rv.Index(i)); err != nil { + return newDecodeError(fmt.Sprintf("%d", i), err) + } + } + return nil +} + +// A String represents a set of strings. It can represent the intersection of a +// set of regexps, or a single exact string. In general, the domain of a String +// is non-empty, but we do not attempt to prove emptiness of a regexp value. +type String struct { + kind stringKind + re []*regexp.Regexp // Intersection of regexps + exact string +} + +type stringKind int + +const ( + stringRegex stringKind = iota + stringExact +) + +func NewStringRegex(exprs ...string) (String, error) { + if len(exprs) == 0 { + exprs = []string{""} + } + v := String{kind: -1} + for _, expr := range exprs { + if expr == "" { + // Skip constructing the regexp. It won't have a "literal prefix" + // and so we wind up thinking this is a regexp instead of an exact + // (empty) string. + v = String{kind: stringExact, exact: ""} + continue + } + + re, err := regexp.Compile(`\A(?:` + expr + `)\z`) + if err != nil { + return String{}, fmt.Errorf("parsing value: %s", err) + } + + // An exact value narrows the whole domain to exact, so we're done, but + // should keep parsing. + if v.kind == stringExact { + continue + } + + if exact, complete := re.LiteralPrefix(); complete { + v = String{kind: stringExact, exact: exact} + } else { + v.kind = stringRegex + v.re = append(v.re, re) + } + } + return v, nil +} + +func NewStringExact(s string) String { + return String{kind: stringExact, exact: s} +} + +// Exact returns whether this Value is known to consist of a single string. +func (d String) Exact() bool { + return d.kind == stringExact +} + +func (d String) WhyNotExact() string { + if d.kind == stringExact { + return "" + } + return "string is not exact" +} + +func (d String) decode(rv reflect.Value) error { + if d.kind != stringExact { + return &inexactError{"regex", rv.Type().String()} + } + switch rv.Kind() { + default: + return fmt.Errorf("cannot decode String into %s", rv.Type()) + case reflect.String: + rv.SetString(d.exact) + case reflect.Int: + i, err := strconv.Atoi(d.exact) + if err != nil { + return fmt.Errorf("cannot decode String into %s: %s", rv.Type(), err) + } + rv.SetInt(int64(i)) + case reflect.Bool: + b, err := strconv.ParseBool(d.exact) + if err != nil { + return fmt.Errorf("cannot decode String into %s: %s", rv.Type(), err) + } + rv.SetBool(b) + } + return nil +} diff --git a/src/simd/_gen/unify/dot.go b/src/simd/_gen/unify/dot.go new file mode 100644 index 00000000000000..6fafa252ba78d1 --- /dev/null +++ b/src/simd/_gen/unify/dot.go @@ -0,0 +1,221 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "bytes" + "fmt" + "html" + "io" + "os" + "os/exec" + "strings" +) + +const maxNodes = 30 + +type dotEncoder struct { + w *bytes.Buffer + + idGen int // Node name generation + valLimit int // Limit the number of Values in a subgraph + + idp identPrinter +} + +func newDotEncoder() *dotEncoder { + return &dotEncoder{ + w: new(bytes.Buffer), + } +} + +func (enc *dotEncoder) clear() { + enc.w.Reset() + enc.idGen = 0 +} + +func (enc *dotEncoder) writeTo(w io.Writer) { + fmt.Fprintln(w, "digraph {") + // Use the "new" ranking algorithm, which lets us put nodes from different + // clusters in the same rank. + fmt.Fprintln(w, "newrank=true;") + fmt.Fprintln(w, "node [shape=box, ordering=out];") + + w.Write(enc.w.Bytes()) + fmt.Fprintln(w, "}") +} + +func (enc *dotEncoder) writeSvg(w io.Writer) error { + cmd := exec.Command("dot", "-Tsvg") + in, err := cmd.StdinPipe() + if err != nil { + return err + } + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + return err + } + enc.writeTo(in) + in.Close() + if err := cmd.Wait(); err != nil { + return err + } + // Trim SVG header so the result can be embedded + // + // TODO: In Graphviz 10.0.1, we could use -Tsvg_inline. + svg := out.Bytes() + if i := bytes.Index(svg, []byte("= 0 { + svg = svg[i:] + } + _, err = w.Write(svg) + return err +} + +func (enc *dotEncoder) newID(f string) string { + id := fmt.Sprintf(f, enc.idGen) + enc.idGen++ + return id +} + +func (enc *dotEncoder) node(label, sublabel string) string { + id := enc.newID("n%d") + l := html.EscapeString(label) + if sublabel != "" { + l += fmt.Sprintf("
%s", html.EscapeString(sublabel)) + } + fmt.Fprintf(enc.w, "%s [label=<%s>];\n", id, l) + return id +} + +func (enc *dotEncoder) edge(from, to string, label string, args ...any) { + l := fmt.Sprintf(label, args...) + fmt.Fprintf(enc.w, "%s -> %s [label=%q];\n", from, to, l) +} + +func (enc *dotEncoder) valueSubgraph(v *Value) { + enc.valLimit = maxNodes + cID := enc.newID("cluster_%d") + fmt.Fprintf(enc.w, "subgraph %s {\n", cID) + fmt.Fprintf(enc.w, "style=invis;") + vID := enc.value(v) + fmt.Fprintf(enc.w, "}\n") + // We don't need the IDs right now. + _, _ = cID, vID +} + +func (enc *dotEncoder) value(v *Value) string { + if enc.valLimit <= 0 { + id := enc.newID("n%d") + fmt.Fprintf(enc.w, "%s [label=\"...\", shape=triangle];\n", id) + return id + } + enc.valLimit-- + + switch vd := v.Domain.(type) { + default: + panic(fmt.Sprintf("unknown domain type %T", vd)) + + case nil: + return enc.node("_|_", "") + + case Top: + return enc.node("_", "") + + // TODO: Like in YAML, figure out if this is just a sum. In dot, we + // could say any unentangled variable is a sum, and if it has more than + // one reference just share the node. + + // case Sum: + // node := enc.node("Sum", "") + // for i, elt := range vd.vs { + // enc.edge(node, enc.value(elt), "%d", i) + // if enc.valLimit <= 0 { + // break + // } + // } + // return node + + case Def: + node := enc.node("Def", "") + for k, v := range vd.All() { + enc.edge(node, enc.value(v), "%s", k) + if enc.valLimit <= 0 { + break + } + } + return node + + case Tuple: + if vd.repeat == nil { + label := "Tuple" + node := enc.node(label, "") + for i, elt := range vd.vs { + enc.edge(node, enc.value(elt), "%d", i) + if enc.valLimit <= 0 { + break + } + } + return node + } else { + // TODO + return enc.node("TODO: Repeat", "") + } + + case String: + switch vd.kind { + case stringExact: + return enc.node(fmt.Sprintf("%q", vd.exact), "") + case stringRegex: + var parts []string + for _, re := range vd.re { + parts = append(parts, fmt.Sprintf("%q", re)) + } + return enc.node(strings.Join(parts, "&"), "") + } + panic("bad String kind") + + case Var: + return enc.node(fmt.Sprintf("Var %s", enc.idp.unique(vd.id)), "") + } +} + +func (enc *dotEncoder) envSubgraph(e envSet) { + enc.valLimit = maxNodes + cID := enc.newID("cluster_%d") + fmt.Fprintf(enc.w, "subgraph %s {\n", cID) + fmt.Fprintf(enc.w, "style=invis;") + vID := enc.env(e.root) + fmt.Fprintf(enc.w, "}\n") + _, _ = cID, vID +} + +func (enc *dotEncoder) env(e *envExpr) string { + switch e.kind { + default: + panic("bad kind") + case envZero: + return enc.node("0", "") + case envUnit: + return enc.node("1", "") + case envBinding: + node := enc.node(fmt.Sprintf("%q :", enc.idp.unique(e.id)), "") + enc.edge(node, enc.value(e.val), "") + return node + case envProduct: + node := enc.node("⨯", "") + for _, op := range e.operands { + enc.edge(node, enc.env(op), "") + } + return node + case envSum: + node := enc.node("+", "") + for _, op := range e.operands { + enc.edge(node, enc.env(op), "") + } + return node + } +} diff --git a/src/simd/_gen/unify/env.go b/src/simd/_gen/unify/env.go new file mode 100644 index 00000000000000..3331ff795064bb --- /dev/null +++ b/src/simd/_gen/unify/env.go @@ -0,0 +1,480 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "reflect" + "strings" +) + +// An envSet is an immutable set of environments, where each environment is a +// mapping from [ident]s to [Value]s. +// +// To keep this compact, we use an algebraic representation similar to +// relational algebra. The atoms are zero, unit, or a singular binding: +// +// - A singular binding is an environment set consisting of a single environment +// that binds a single ident to a single value. +// +// - Zero is the empty set. +// +// - Unit is an environment set consisting of a single, empty environment (no +// bindings). +// +// From these, we build up more complex sets of environments using sums and +// cross products: +// +// - A sum is simply the union of the two environment sets. +// +// - A cross product is the Cartesian product of the two environment sets, +// followed by combining each pair of environments. Combining simply merges the +// two mappings, but fails if the mappings overlap. +// +// For example, to represent {{x: 1, y: 1}, {x: 2, y: 2}}, we build the two +// environments and sum them: +// +// ({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2}) +// +// If we add a third variable z that can be 1 or 2, independent of x and y, we +// get four logical environments: +// +// {x: 1, y: 1, z: 1} +// {x: 2, y: 2, z: 1} +// {x: 1, y: 1, z: 2} +// {x: 2, y: 2, z: 2} +// +// This could be represented as a sum of all four environments, but because z is +// independent, we can use a more compact representation: +// +// (({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2})) ⨯ ({z: 1} + {z: 2}) +// +// Environment sets obey commutative algebra rules: +// +// e + 0 = e +// e ⨯ 0 = 0 +// e ⨯ 1 = e +// e + f = f + e +// e ⨯ f = f ⨯ e +type envSet struct { + root *envExpr +} + +type envExpr struct { + // TODO: A tree-based data structure for this may not be ideal, since it + // involves a lot of walking to find things and we often have to do deep + // rewrites anyway for partitioning. Would some flattened array-style + // representation be better, possibly combined with an index of ident uses? + // We could even combine that with an immutable array abstraction (ala + // Clojure) that could enable more efficient construction operations. + + kind envExprKind + + // For envBinding + id *ident + val *Value + + // For sum or product. Len must be >= 2 and none of the elements can have + // the same kind as this node. + operands []*envExpr +} + +type envExprKind byte + +const ( + envZero envExprKind = iota + envUnit + envProduct + envSum + envBinding +) + +var ( + // topEnv is the unit value (multiplicative identity) of a [envSet]. + topEnv = envSet{envExprUnit} + // bottomEnv is the zero value (additive identity) of a [envSet]. + bottomEnv = envSet{envExprZero} + + envExprZero = &envExpr{kind: envZero} + envExprUnit = &envExpr{kind: envUnit} +) + +// bind binds id to each of vals in e. +// +// Its panics if id is already bound in e. +// +// Environments are typically initially constructed by starting with [topEnv] +// and calling bind one or more times. +func (e envSet) bind(id *ident, vals ...*Value) envSet { + if e.isEmpty() { + return bottomEnv + } + + // TODO: If any of vals are _, should we just drop that val? We're kind of + // inconsistent about whether an id missing from e means id is invalid or + // means id is _. + + // Check that id isn't present in e. + for range e.root.bindings(id) { + panic("id " + id.name + " already present in environment") + } + + // Create a sum of all the values. + bindings := make([]*envExpr, 0, 1) + for _, val := range vals { + bindings = append(bindings, &envExpr{kind: envBinding, id: id, val: val}) + } + + // Multiply it in. + return envSet{newEnvExprProduct(e.root, newEnvExprSum(bindings...))} +} + +func (e envSet) isEmpty() bool { + return e.root.kind == envZero +} + +// bindings yields all [envBinding] nodes in e with the given id. If id is nil, +// it yields all binding nodes. +func (e *envExpr) bindings(id *ident) iter.Seq[*envExpr] { + // This is just a pre-order walk and it happens this is the only thing we + // need a pre-order walk for. + return func(yield func(*envExpr) bool) { + var rec func(e *envExpr) bool + rec = func(e *envExpr) bool { + if e.kind == envBinding && (id == nil || e.id == id) { + if !yield(e) { + return false + } + } + for _, o := range e.operands { + if !rec(o) { + return false + } + } + return true + } + rec(e) + } +} + +// newEnvExprProduct constructs a product node from exprs, performing +// simplifications. It does NOT check that bindings are disjoint. +func newEnvExprProduct(exprs ...*envExpr) *envExpr { + factors := make([]*envExpr, 0, 2) + for _, expr := range exprs { + switch expr.kind { + case envZero: + return envExprZero + case envUnit: + // No effect on product + case envProduct: + factors = append(factors, expr.operands...) + default: + factors = append(factors, expr) + } + } + + if len(factors) == 0 { + return envExprUnit + } else if len(factors) == 1 { + return factors[0] + } + return &envExpr{kind: envProduct, operands: factors} +} + +// newEnvExprSum constructs a sum node from exprs, performing simplifications. +func newEnvExprSum(exprs ...*envExpr) *envExpr { + // TODO: If all of envs are products (or bindings), factor any common terms. + // E.g., x * y + x * z ==> x * (y + z). This is easy to do for binding + // terms, but harder to do for more general terms. + + var have smallSet[*envExpr] + terms := make([]*envExpr, 0, 2) + for _, expr := range exprs { + switch expr.kind { + case envZero: + // No effect on sum + case envSum: + for _, expr1 := range expr.operands { + if have.Add(expr1) { + terms = append(terms, expr1) + } + } + default: + if have.Add(expr) { + terms = append(terms, expr) + } + } + } + + if len(terms) == 0 { + return envExprZero + } else if len(terms) == 1 { + return terms[0] + } + return &envExpr{kind: envSum, operands: terms} +} + +func crossEnvs(env1, env2 envSet) envSet { + // Confirm that envs have disjoint idents. + var ids1 smallSet[*ident] + for e := range env1.root.bindings(nil) { + ids1.Add(e.id) + } + for e := range env2.root.bindings(nil) { + if ids1.Has(e.id) { + panic(fmt.Sprintf("%s bound on both sides of cross-product", e.id.name)) + } + } + + return envSet{newEnvExprProduct(env1.root, env2.root)} +} + +func unionEnvs(envs ...envSet) envSet { + exprs := make([]*envExpr, len(envs)) + for i := range envs { + exprs[i] = envs[i].root + } + return envSet{newEnvExprSum(exprs...)} +} + +// envPartition is a subset of an env where id is bound to value in all +// deterministic environments. +type envPartition struct { + id *ident + value *Value + env envSet +} + +// partitionBy splits e by distinct bindings of id and removes id from each +// partition. +// +// If there are environments in e where id is not bound, they will not be +// reflected in any partition. +// +// It panics if e is bottom, since attempting to partition an empty environment +// set almost certainly indicates a bug. +func (e envSet) partitionBy(id *ident) []envPartition { + if e.isEmpty() { + // We could return zero partitions, but getting here at all almost + // certainly indicates a bug. + panic("cannot partition empty environment set") + } + + // Emit a partition for each value of id. + var seen smallSet[*Value] + var parts []envPartition + for n := range e.root.bindings(id) { + if !seen.Add(n.val) { + // Already emitted a partition for this value. + continue + } + + parts = append(parts, envPartition{ + id: id, + value: n.val, + env: envSet{e.root.substitute(id, n.val)}, + }) + } + + return parts +} + +// substitute replaces bindings of id to val with 1 and bindings of id to any +// other value with 0 and simplifies the result. +func (e *envExpr) substitute(id *ident, val *Value) *envExpr { + switch e.kind { + default: + panic("bad kind") + + case envZero, envUnit: + return e + + case envBinding: + if e.id != id { + return e + } else if e.val != val { + return envExprZero + } else { + return envExprUnit + } + + case envProduct, envSum: + // Substitute each operand. Sometimes, this won't change anything, so we + // build the new operands list lazily. + var nOperands []*envExpr + for i, op := range e.operands { + nOp := op.substitute(id, val) + if nOperands == nil && op != nOp { + // Operand diverged; initialize nOperands. + nOperands = make([]*envExpr, 0, len(e.operands)) + nOperands = append(nOperands, e.operands[:i]...) + } + if nOperands != nil { + nOperands = append(nOperands, nOp) + } + } + if nOperands == nil { + // Nothing changed. + return e + } + if e.kind == envProduct { + return newEnvExprProduct(nOperands...) + } else { + return newEnvExprSum(nOperands...) + } + } +} + +// A smallSet is a set optimized for stack allocation when small. +type smallSet[T comparable] struct { + array [32]T + n int + + m map[T]struct{} +} + +// Has returns whether val is in set. +func (s *smallSet[T]) Has(val T) bool { + arr := s.array[:s.n] + for i := range arr { + if arr[i] == val { + return true + } + } + _, ok := s.m[val] + return ok +} + +// Add adds val to the set and returns true if it was added (not already +// present). +func (s *smallSet[T]) Add(val T) bool { + // Test for presence. + if s.Has(val) { + return false + } + + // Add it + if s.n < len(s.array) { + s.array[s.n] = val + s.n++ + } else { + if s.m == nil { + s.m = make(map[T]struct{}) + } + s.m[val] = struct{}{} + } + return true +} + +type ident struct { + _ [0]func() // Not comparable (only compare *ident) + name string +} + +type Var struct { + id *ident +} + +func (d Var) Exact() bool { + // These can't appear in concrete Values. + panic("Exact called on non-concrete Value") +} + +func (d Var) WhyNotExact() string { + // These can't appear in concrete Values. + return "WhyNotExact called on non-concrete Value" +} + +func (d Var) decode(rv reflect.Value) error { + return &inexactError{"var", rv.Type().String()} +} + +func (d Var) unify(w *Value, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + // TODO: Vars from !sums in the input can have a huge number of values. + // Unifying these could be way more efficient with some indexes over any + // exact values we can pull out, like Def fields that are exact Strings. + // Maybe we try to produce an array of yes/no/maybe matches and then we only + // have to do deeper evaluation of the maybes. We could probably cache this + // on an envTerm. It may also help to special-case Var/Var unification to + // pick which one to index versus enumerate. + + if vd, ok := w.Domain.(Var); ok && d.id == vd.id { + // Unifying $x with $x results in $x. If we descend into this we'll have + // problems because we strip $x out of the environment to keep ourselves + // honest and then can't find it on the other side. + // + // TODO: I'm not positive this is the right fix. + return vd, e, nil + } + + // We need to unify w with the value of d in each possible environment. We + // can save some work by grouping environments by the value of d, since + // there will be a lot of redundancy here. + var nEnvs []envSet + envParts := e.partitionBy(d.id) + for i, envPart := range envParts { + exit := uf.enterVar(d.id, i) + // Each branch logically gets its own copy of the initial environment + // (narrowed down to just this binding of the variable), and each branch + // may result in different changes to that starting environment. + res, e2, err := w.unify(envPart.value, envPart.env, swap, uf) + exit.exit() + if err != nil { + return nil, envSet{}, err + } + if res.Domain == nil { + // This branch entirely failed to unify, so it's gone. + continue + } + nEnv := e2.bind(d.id, res) + nEnvs = append(nEnvs, nEnv) + } + + if len(nEnvs) == 0 { + // All branches failed + return nil, bottomEnv, nil + } + + // The effect of this is entirely captured in the environment. We can return + // back the same Bind node. + return d, unionEnvs(nEnvs...), nil +} + +// An identPrinter maps [ident]s to unique string names. +type identPrinter struct { + ids map[*ident]string + idGen map[string]int +} + +func (p *identPrinter) unique(id *ident) string { + if p.ids == nil { + p.ids = make(map[*ident]string) + p.idGen = make(map[string]int) + } + + name, ok := p.ids[id] + if !ok { + gen := p.idGen[id.name] + p.idGen[id.name]++ + if gen == 0 { + name = id.name + } else { + name = fmt.Sprintf("%s#%d", id.name, gen) + } + p.ids[id] = name + } + + return name +} + +func (p *identPrinter) slice(ids []*ident) string { + var strs []string + for _, id := range ids { + strs = append(strs, p.unique(id)) + } + return fmt.Sprintf("[%s]", strings.Join(strs, ", ")) +} diff --git a/src/simd/_gen/unify/html.go b/src/simd/_gen/unify/html.go new file mode 100644 index 00000000000000..036b80e276b978 --- /dev/null +++ b/src/simd/_gen/unify/html.go @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "html" + "io" + "strings" +) + +func (t *tracer) writeHTML(w io.Writer) { + if !t.saveTree { + panic("writeHTML called without tracer.saveTree") + } + + fmt.Fprintf(w, "", htmlCSS) + for _, root := range t.trees { + dot := newDotEncoder() + html := htmlTracer{w: w, dot: dot} + html.writeTree(root) + } + fmt.Fprintf(w, "\n") +} + +const htmlCSS = ` +.unify { + display: grid; + grid-auto-columns: min-content; + text-align: center; +} + +.header { + grid-row: 1; + font-weight: bold; + padding: 0.25em; + position: sticky; + top: 0; + background: white; +} + +.envFactor { + display: grid; + grid-auto-rows: min-content; + grid-template-columns: subgrid; + text-align: center; +} +` + +type htmlTracer struct { + w io.Writer + dot *dotEncoder + svgs map[any]string +} + +func (t *htmlTracer) writeTree(node *traceTree) { + // TODO: This could be really nice. + // + // - Put nodes that were unified on the same rank with {rank=same; a; b} + // + // - On hover, highlight nodes that node was unified with and the result. If + // it's a variable, highlight it in the environment, too. + // + // - On click, show the details of unifying that node. + // + // This could be the only way to navigate, without necessarily needing the + // whole nest of nodes. + + // TODO: It might be possible to write this out on the fly. + + t.emit([]*Value{node.v, node.w}, []string{"v", "w"}, node.envIn) + + // Render children. + for i, child := range node.children { + if i >= 10 { + fmt.Fprintf(t.w, `
...
`) + break + } + fmt.Fprintf(t.w, `
%s`, html.EscapeString(child.label)) + t.writeTree(child) + fmt.Fprintf(t.w, "
\n") + } + + // Render result. + if node.err != nil { + fmt.Fprintf(t.w, "Error: %s\n", html.EscapeString(node.err.Error())) + } else { + t.emit([]*Value{node.res}, []string{"res"}, node.env) + } +} + +func htmlSVG[Key comparable](t *htmlTracer, f func(Key), arg Key) string { + if s, ok := t.svgs[arg]; ok { + return s + } + var buf strings.Builder + f(arg) + t.dot.writeSvg(&buf) + t.dot.clear() + svg := buf.String() + if t.svgs == nil { + t.svgs = make(map[any]string) + } + t.svgs[arg] = svg + buf.Reset() + return svg +} + +func (t *htmlTracer) emit(vs []*Value, labels []string, env envSet) { + fmt.Fprintf(t.w, `
`) + for i, v := range vs { + fmt.Fprintf(t.w, `
%s
`, i+1, html.EscapeString(labels[i])) + fmt.Fprintf(t.w, `
%s
`, i+1, htmlSVG(t, t.dot.valueSubgraph, v)) + } + col := len(vs) + + fmt.Fprintf(t.w, `
in
`, col+1) + fmt.Fprintf(t.w, `
%s
`, col+1, htmlSVG(t, t.dot.envSubgraph, env)) + + fmt.Fprintf(t.w, `
`) +} diff --git a/src/simd/_gen/unify/pos.go b/src/simd/_gen/unify/pos.go new file mode 100644 index 00000000000000..4f7046a41a9acb --- /dev/null +++ b/src/simd/_gen/unify/pos.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" +) + +type Pos struct { + Path string + Line int +} + +func (p Pos) String() string { + var b []byte + b, _ = p.AppendText(b) + return string(b) +} + +func (p Pos) AppendText(b []byte) ([]byte, error) { + if p.Line == 0 { + if p.Path == "" { + return append(b, "?:?"...), nil + } else { + return append(b, p.Path...), nil + } + } else if p.Path == "" { + return fmt.Appendf(b, "?:%d", p.Line), nil + } + return fmt.Appendf(b, "%s:%d", p.Path, p.Line), nil +} diff --git a/src/simd/_gen/unify/testdata/stress.yaml b/src/simd/_gen/unify/testdata/stress.yaml new file mode 100644 index 00000000000000..e4478536804dbd --- /dev/null +++ b/src/simd/_gen/unify/testdata/stress.yaml @@ -0,0 +1,33 @@ +# In the original representation of environments, this caused an exponential +# blowup in time and allocation. With that representation, this took about 20 +# seconds on my laptop and had a max RSS of ~12 GB. Big enough to be really +# noticeable, but not so big it's likely to crash a developer machine. With the +# better environment representation, it runs almost instantly and has an RSS of +# ~90 MB. +unify: +- !sum + - !sum [1, 2] + - !sum [3, 4] + - !sum [5, 6] + - !sum [7, 8] + - !sum [9, 10] + - !sum [11, 12] + - !sum [13, 14] + - !sum [15, 16] + - !sum [17, 18] + - !sum [19, 20] + - !sum [21, 22] +- !sum + - !sum [1, 2] + - !sum [3, 4] + - !sum [5, 6] + - !sum [7, 8] + - !sum [9, 10] + - !sum [11, 12] + - !sum [13, 14] + - !sum [15, 16] + - !sum [17, 18] + - !sum [19, 20] + - !sum [21, 22] +all: + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] diff --git a/src/simd/_gen/unify/testdata/unify.yaml b/src/simd/_gen/unify/testdata/unify.yaml new file mode 100644 index 00000000000000..131e527cfac91b --- /dev/null +++ b/src/simd/_gen/unify/testdata/unify.yaml @@ -0,0 +1,174 @@ +# Basic tests of unification + +# +# Terminals +# + +unify: +- _ +- _ +want: + _ +--- +unify: +- _ +- test +want: + test +--- +unify: +- test +- t?est +want: + test +--- +unify: +- 1 +- 1 +want: + 1 +--- +unify: +- test +- foo +want: + _|_ + +# +# Tuple +# + +--- +unify: +- [a, b] +- [a, b] +want: + [a, b] +--- +unify: +- [a, _] +- [_, b] +want: + [a, b] +--- +unify: +- ["ab?c", "de?f"] +- [ac, def] +want: + [ac, def] + +# +# Repeats +# + +--- +unify: +- !repeat [a] +- [_] +want: + [a] +--- +unify: +- !repeat [a] +- [_, _] +want: + [a, a] +--- +unify: +- !repeat [a] +- [b] +want: + _|_ +--- +unify: +- !repeat [xy*] +- [x, xy, xyy] +want: + [x, xy, xyy] +--- +unify: +- !repeat [xy*] +- !repeat ["xz?y*"] +- [x, xy, xyy] +want: + [x, xy, xyy] +--- +unify: +- !repeat [!sum [a, b]] +- [a, b, a] +all: +- [a, b, a] +--- +unify: +- !repeat [!sum [a, b]] +- !repeat [!sum [b, c]] +- [b, b, b] +all: +- [b, b, b] +--- +unify: +- !repeat [!sum [a, b]] +- !repeat [!sum [b, c]] +- [a] +all: [] + +# +# Def +# + +--- +unify: +- {a: a, b: b} +- {a: a, b: b} +want: + {a: a, b: b} +--- +unify: +- {a: a} +- {b: b} +want: + {a: a, b: b} + +# +# Sum +# + +--- +unify: +- !sum [1, 2] +- !sum [2, 3] +all: +- 2 +--- +unify: +- !sum [{label: a, value: abc}, {label: b, value: def}] +- !sum [{value: "ab?c", extra: d}, {value: "def?", extra: g}] +all: +- {extra: d, label: a, value: abc} +- {extra: g, label: b, value: def} +--- +# A sum of repeats must deal with different dynamically-created variables in +# each branch. +unify: +- !sum [!repeat [a], !repeat [b]] +- [a, a, a] +all: +- [a, a, a] +--- +unify: +- !sum [!repeat [a], !repeat [b]] +- [a, a, b] +all: [] +--- +# Exercise sumEnvs with more than one result +unify: +- !sum + - [a|b, c|d] + - [e, g] +- [!sum [a, b, e, f], !sum [c, d, g, h]] +all: +- [a, c] +- [a, d] +- [b, c] +- [b, d] +- [e, g] diff --git a/src/simd/_gen/unify/testdata/vars.yaml b/src/simd/_gen/unify/testdata/vars.yaml new file mode 100644 index 00000000000000..fe8a57e4e3396e --- /dev/null +++ b/src/simd/_gen/unify/testdata/vars.yaml @@ -0,0 +1,175 @@ +# +# Basic tests +# + +name: "basic string" +unify: +- $x +- test +all: +- test +--- +name: "basic tuple" +unify: +- [$x, $x] +- [test, test] +all: +- [test, test] +--- +name: "three tuples" +unify: +- [$x, $x] +- [test, _] +- [_, test] +all: +- [test, test] +--- +name: "basic def" +unify: +- {a: $x, b: $x} +- {a: test, b: test} +all: +- {a: test, b: test} +--- +name: "three defs" +unify: +- {a: $x, b: $x} +- {a: test} +- {b: test} +all: +- {a: test, b: test} + +# +# Bottom tests +# + +--- +name: "basic bottom" +unify: +- [$x, $x] +- [test, foo] +all: [] +--- +name: "three-way bottom" +unify: +- [$x, $x] +- [test, _] +- [_, foo] +all: [] + +# +# Basic sum tests +# + +--- +name: "basic sum" +unify: +- $x +- !sum [a, b] +all: +- a +- b +--- +name: "sum of tuples" +unify: +- [$x] +- !sum [[a], [b]] +all: +- [a] +- [b] +--- +name: "acausal sum" +unify: +- [_, !sum [a, b]] +- [$x, $x] +all: +- [a, a] +- [b, b] + +# +# Transitivity tests +# + +--- +name: "transitivity" +unify: +- [_, _, _, test] +- [$x, $x, _, _] +- [ _, $x, $x, _] +- [ _, _, $x, $x] +all: +- [test, test, test, test] + +# +# Multiple vars +# + +--- +name: "basic uncorrelated vars" +unify: +- - !sum [1, 2] + - !sum [3, 4] +- - $a + - $b +all: +- [1, 3] +- [1, 4] +- [2, 3] +- [2, 4] +--- +name: "uncorrelated vars" +unify: +- - !sum [1, 2] + - !sum [3, 4] + - !sum [1, 2] +- - $a + - $b + - $a +all: +- [1, 3, 1] +- [1, 4, 1] +- [2, 3, 2] +- [2, 4, 2] +--- +name: "entangled vars" +unify: +- - !sum [[1,2],[3,4]] + - !sum [[2,1],[3,4],[4,3]] +- - [$a, $b] + - [$b, $a] +all: +- - [1, 2] + - [2, 1] +- - [3, 4] + - [4, 3] + +# +# End-to-end examples +# + +--- +name: "end-to-end" +unify: +- go: Add + in: + - go: $t + - go: $t +- in: !repeat + - !sum + - go: Int32x4 + base: int + - go: Uint32x4 + base: uint +all: +- go: Add + in: + - base: int + go: Int32x4 + - base: int + go: Int32x4 +- go: Add + in: + - base: uint + go: Uint32x4 + - base: uint + go: Uint32x4 diff --git a/src/simd/_gen/unify/trace.go b/src/simd/_gen/unify/trace.go new file mode 100644 index 00000000000000..b0aa35255e466b --- /dev/null +++ b/src/simd/_gen/unify/trace.go @@ -0,0 +1,168 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "io" + "strings" + + "gopkg.in/yaml.v3" +) + +// debugDotInHTML, if true, includes dot code for all graphs in the HTML. Useful +// for debugging the dot output itself. +const debugDotInHTML = false + +var Debug struct { + // UnifyLog, if non-nil, receives a streaming text trace of unification. + UnifyLog io.Writer + + // HTML, if non-nil, writes an HTML trace of unification to HTML. + HTML io.Writer +} + +type tracer struct { + logw io.Writer + + enc yamlEncoder // Print consistent idents throughout + + saveTree bool // if set, record tree; required for HTML output + + path []string + + node *traceTree + trees []*traceTree +} + +type traceTree struct { + label string // Identifies this node as a child of parent + v, w *Value // Unification inputs + envIn envSet + res *Value // Unification result + env envSet + err error // or error + + parent *traceTree + children []*traceTree +} + +type tracerExit struct { + t *tracer + len int + node *traceTree +} + +func (t *tracer) enter(pat string, vals ...any) tracerExit { + if t == nil { + return tracerExit{} + } + + label := fmt.Sprintf(pat, vals...) + + var p *traceTree + if t.saveTree { + p = t.node + if p != nil { + t.node = &traceTree{label: label, parent: p} + p.children = append(p.children, t.node) + } + } + + t.path = append(t.path, label) + return tracerExit{t, len(t.path) - 1, p} +} + +func (t *tracer) enterVar(id *ident, branch int) tracerExit { + if t == nil { + return tracerExit{} + } + + // Use the tracer's ident printer + return t.enter("Var %s br %d", t.enc.idp.unique(id), branch) +} + +func (te tracerExit) exit() { + if te.t == nil { + return + } + te.t.path = te.t.path[:te.len] + te.t.node = te.node +} + +func indentf(prefix string, pat string, vals ...any) string { + s := fmt.Sprintf(pat, vals...) + if len(prefix) == 0 { + return s + } + if !strings.Contains(s, "\n") { + return prefix + s + } + + indent := prefix + if strings.TrimLeft(prefix, " ") != "" { + // Prefix has non-space characters in it. Construct an all space-indent. + indent = strings.Repeat(" ", len(prefix)) + } + return prefix + strings.ReplaceAll(s, "\n", "\n"+indent) +} + +func yamlf(prefix string, node *yaml.Node) string { + b, err := yaml.Marshal(node) + if err != nil { + return fmt.Sprintf("", err) + } + return strings.TrimRight(indentf(prefix, "%s", b), " \n") +} + +func (t *tracer) logf(pat string, vals ...any) { + if t == nil || t.logw == nil { + return + } + prefix := fmt.Sprintf("[%s] ", strings.Join(t.path, "/")) + s := indentf(prefix, pat, vals...) + s = strings.TrimRight(s, " \n") + fmt.Fprintf(t.logw, "%s\n", s) +} + +func (t *tracer) traceUnify(v, w *Value, e envSet) { + if t == nil { + return + } + + t.logf("Unify\n%s\nwith\n%s\nin\n%s", + yamlf(" ", t.enc.value(v)), + yamlf(" ", t.enc.value(w)), + yamlf(" ", t.enc.env(e))) + + if t.saveTree { + if t.node == nil { + t.node = &traceTree{} + t.trees = append(t.trees, t.node) + } + t.node.v, t.node.w, t.node.envIn = v, w, e + } +} + +func (t *tracer) traceDone(res *Value, e envSet, err error) { + if t == nil { + return + } + + if err != nil { + t.logf("==> %s", err) + } else { + t.logf("==>\n%s", yamlf(" ", t.enc.closure(Closure{res, e}))) + } + + if t.saveTree { + node := t.node + if node == nil { + panic("popped top of trace stack") + } + node.res, node.err = res, err + node.env = e + } +} diff --git a/src/simd/_gen/unify/unify.go b/src/simd/_gen/unify/unify.go new file mode 100644 index 00000000000000..9d22bf1915ee39 --- /dev/null +++ b/src/simd/_gen/unify/unify.go @@ -0,0 +1,322 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unify implements unification of structured values. +// +// A [Value] represents a possibly infinite set of concrete values, where a +// value is either a string ([String]), a tuple of values ([Tuple]), or a +// string-keyed map of values called a "def" ([Def]). These sets can be further +// constrained by variables ([Var]). A [Value] combined with bindings of +// variables is a [Closure]. +// +// [Unify] finds a [Closure] that satisfies two or more other [Closure]s. This +// can be thought of as intersecting the sets represented by these Closures' +// values, or as the greatest lower bound/infimum of these Closures. If no such +// Closure exists, the result of unification is "bottom", or the empty set. +// +// # Examples +// +// The regular expression "a*" is the infinite set of strings of zero or more +// "a"s. "a*" can be unified with "a" or "aa" or "aaa", and the result is just +// "a", "aa", or "aaa", respectively. However, unifying "a*" with "b" fails +// because there are no values that satisfy both. +// +// Sums express sets directly. For example, !sum [a, b] is the set consisting of +// "a" and "b". Unifying this with !sum [b, c] results in just "b". This also +// makes it easy to demonstrate that unification isn't necessarily a single +// concrete value. For example, unifying !sum [a, b, c] with !sum [b, c, d] +// results in two concrete values: "b" and "c". +// +// The special value _ or "top" represents all possible values. Unifying _ with +// any value x results in x. +// +// Unifying composite values—tuples and defs—unifies their elements. +// +// The value [a*, aa] is an infinite set of tuples. If we unify that with the +// value [aaa, a*], the only possible value that satisfies both is [aaa, aa]. +// Likewise, this is the intersection of the sets described by these two values. +// +// Defs are similar to tuples, but they are indexed by strings and don't have a +// fixed length. For example, {x: a, y: b} is a def with two fields. Any field +// not mentioned in a def is implicitly top. Thus, unifying this with {y: b, z: +// c} results in {x: a, y: b, z: c}. +// +// Variables constrain values. For example, the value [$x, $x] represents all +// tuples whose first and second values are the same, but doesn't otherwise +// constrain that value. Thus, this set includes [a, a] as well as [[b, c, d], +// [b, c, d]], but it doesn't include [a, b]. +// +// Sums are internally implemented as fresh variables that are simultaneously +// bound to all values of the sum. That is !sum [a, b] is actually $var (where +// var is some fresh name), closed under the environment $var=a | $var=b. +package unify + +import ( + "errors" + "fmt" + "slices" +) + +// Unify computes a Closure that satisfies each input Closure. If no such +// Closure exists, it returns bottom. +func Unify(closures ...Closure) (Closure, error) { + if len(closures) == 0 { + return Closure{topValue, topEnv}, nil + } + + var trace *tracer + if Debug.UnifyLog != nil || Debug.HTML != nil { + trace = &tracer{ + logw: Debug.UnifyLog, + saveTree: Debug.HTML != nil, + } + } + + unified := closures[0] + for _, c := range closures[1:] { + var err error + uf := newUnifier() + uf.tracer = trace + e := crossEnvs(unified.env, c.env) + unified.val, unified.env, err = unified.val.unify(c.val, e, false, uf) + if Debug.HTML != nil { + uf.writeHTML(Debug.HTML) + } + if err != nil { + return Closure{}, err + } + } + + return unified, nil +} + +type unifier struct { + *tracer +} + +func newUnifier() *unifier { + return &unifier{} +} + +// errDomains is a sentinel error used between unify and unify1 to indicate that +// unify1 could not unify the domains of the two values. +var errDomains = errors.New("cannot unify domains") + +func (v *Value) unify(w *Value, e envSet, swap bool, uf *unifier) (*Value, envSet, error) { + if swap { + // Put the values in order. This just happens to be a handy choke-point + // to do this at. + v, w = w, v + } + + uf.traceUnify(v, w, e) + + d, e2, err := v.unify1(w, e, false, uf) + if err == errDomains { + // Try the other order. + d, e2, err = w.unify1(v, e, true, uf) + if err == errDomains { + // Okay, we really can't unify these. + err = fmt.Errorf("cannot unify %T (%s) and %T (%s): kind mismatch", v.Domain, v.PosString(), w.Domain, w.PosString()) + } + } + if err != nil { + uf.traceDone(nil, envSet{}, err) + return nil, envSet{}, err + } + res := unified(d, v, w) + uf.traceDone(res, e2, nil) + if d == nil { + // Double check that a bottom Value also has a bottom env. + if !e2.isEmpty() { + panic("bottom Value has non-bottom environment") + } + } + + return res, e2, nil +} + +func (v *Value) unify1(w *Value, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + // TODO: If there's an error, attach position information to it. + + vd, wd := v.Domain, w.Domain + + // Bottom returns bottom, and eliminates all possible environments. + if vd == nil || wd == nil { + return nil, bottomEnv, nil + } + + // Top always returns the other. + if _, ok := vd.(Top); ok { + return wd, e, nil + } + + // Variables + if vd, ok := vd.(Var); ok { + return vd.unify(w, e, swap, uf) + } + + // Composite values + if vd, ok := vd.(Def); ok { + if wd, ok := wd.(Def); ok { + return vd.unify(wd, e, swap, uf) + } + } + if vd, ok := vd.(Tuple); ok { + if wd, ok := wd.(Tuple); ok { + return vd.unify(wd, e, swap, uf) + } + } + + // Scalar values + if vd, ok := vd.(String); ok { + if wd, ok := wd.(String); ok { + res := vd.unify(wd) + if res == nil { + e = bottomEnv + } + return res, e, nil + } + } + + return nil, envSet{}, errDomains +} + +func (d Def) unify(o Def, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + out := Def{fields: make(map[string]*Value)} + + // Check keys of d against o. + for key, dv := range d.All() { + ov, ok := o.fields[key] + if !ok { + // ov is implicitly Top. Bypass unification. + out.fields[key] = dv + continue + } + exit := uf.enter("%s", key) + res, e2, err := dv.unify(ov, e, swap, uf) + exit.exit() + if err != nil { + return nil, envSet{}, err + } else if res.Domain == nil { + // No match. + return nil, bottomEnv, nil + } + out.fields[key] = res + e = e2 + } + // Check keys of o that we didn't already check. These all implicitly match + // because we know the corresponding fields in d are all Top. + for key, dv := range o.All() { + if _, ok := d.fields[key]; !ok { + out.fields[key] = dv + } + } + return out, e, nil +} + +func (v Tuple) unify(w Tuple, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + if v.repeat != nil && w.repeat != nil { + // Since we generate the content of these lazily, there's not much we + // can do but just stick them on a list to unify later. + return Tuple{repeat: concat(v.repeat, w.repeat)}, e, nil + } + + // Expand any repeated tuples. + tuples := make([]Tuple, 0, 2) + if v.repeat == nil { + tuples = append(tuples, v) + } else { + v2, e2 := v.doRepeat(e, len(w.vs)) + tuples = append(tuples, v2...) + e = e2 + } + if w.repeat == nil { + tuples = append(tuples, w) + } else { + w2, e2 := w.doRepeat(e, len(v.vs)) + tuples = append(tuples, w2...) + e = e2 + } + + // Now unify all of the tuples (usually this will be just 2 tuples) + out := tuples[0] + for _, t := range tuples[1:] { + if len(out.vs) != len(t.vs) { + uf.logf("tuple length mismatch") + return nil, bottomEnv, nil + } + zs := make([]*Value, len(out.vs)) + for i, v1 := range out.vs { + exit := uf.enter("%d", i) + z, e2, err := v1.unify(t.vs[i], e, swap, uf) + exit.exit() + if err != nil { + return nil, envSet{}, err + } else if z.Domain == nil { + return nil, bottomEnv, nil + } + zs[i] = z + e = e2 + } + out = Tuple{vs: zs} + } + + return out, e, nil +} + +// doRepeat creates a fixed-length tuple from a repeated tuple. The caller is +// expected to unify the returned tuples. +func (v Tuple) doRepeat(e envSet, n int) ([]Tuple, envSet) { + res := make([]Tuple, len(v.repeat)) + for i, gen := range v.repeat { + res[i].vs = make([]*Value, n) + for j := range n { + res[i].vs[j], e = gen(e) + } + } + return res, e +} + +// unify intersects the domains of two [String]s. If it can prove that this +// domain is empty, it returns nil (bottom). +// +// TODO: Consider splitting literals and regexps into two domains. +func (v String) unify(w String) Domain { + // Unification is symmetric, so put them in order of string kind so we only + // have to deal with half the cases. + if v.kind > w.kind { + v, w = w, v + } + + switch v.kind { + case stringRegex: + switch w.kind { + case stringRegex: + // Construct a match against all of the regexps + return String{kind: stringRegex, re: slices.Concat(v.re, w.re)} + case stringExact: + for _, re := range v.re { + if !re.MatchString(w.exact) { + return nil + } + } + return w + } + case stringExact: + if v.exact != w.exact { + return nil + } + return v + } + panic("bad string kind") +} + +func concat[T any](s1, s2 []T) []T { + // Reuse s1 or s2 if possible. + if len(s1) == 0 { + return s2 + } + return append(s1[:len(s1):len(s1)], s2...) +} diff --git a/src/simd/_gen/unify/unify_test.go b/src/simd/_gen/unify/unify_test.go new file mode 100644 index 00000000000000..8071e0c959706c --- /dev/null +++ b/src/simd/_gen/unify/unify_test.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "slices" + "strings" + "testing" + + "gopkg.in/yaml.v3" +) + +func TestUnify(t *testing.T) { + paths, err := filepath.Glob("testdata/*") + if err != nil { + t.Fatal(err) + } + if len(paths) == 0 { + t.Fatal("no testdata found") + } + for _, path := range paths { + // Skip paths starting with _ so experimental files can be added. + base := filepath.Base(path) + if base[0] == '_' { + continue + } + if !strings.HasSuffix(base, ".yaml") { + t.Errorf("non-.yaml file in testdata: %s", base) + continue + } + base = strings.TrimSuffix(base, ".yaml") + + t.Run(base, func(t *testing.T) { + testUnify(t, path) + }) + } +} + +func testUnify(t *testing.T, path string) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + type testCase struct { + Skip bool + Name string + Unify []Closure + Want yaml.Node + All yaml.Node + } + dec := yaml.NewDecoder(f) + + for i := 0; ; i++ { + var tc testCase + err := dec.Decode(&tc) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + name := tc.Name + if name == "" { + name = fmt.Sprint(i) + } + + t.Run(name, func(t *testing.T) { + if tc.Skip { + t.Skip("skip: true set in test case") + } + + defer func() { + p := recover() + if p != nil || t.Failed() { + // Redo with a trace + // + // TODO: Use t.Output() in Go 1.25. + var buf bytes.Buffer + Debug.UnifyLog = &buf + func() { + defer func() { + // If the original unify panicked, the second one + // probably will, too. Ignore it and let the first panic + // bubble. + recover() + }() + Unify(tc.Unify...) + }() + Debug.UnifyLog = nil + t.Logf("Trace:\n%s", buf.String()) + } + if p != nil { + panic(p) + } + }() + + // Unify the test cases + // + // TODO: Try reordering the inputs also + c, err := Unify(tc.Unify...) + if err != nil { + // TODO: Tests of errors + t.Fatal(err) + } + + // Encode the result back to YAML so we can check if it's structurally + // equal. + clean := func(val any) *yaml.Node { + var node yaml.Node + node.Encode(val) + for n := range allYamlNodes(&node) { + // Canonicalize the style. There may be other style flags we need to + // muck with. + n.Style &^= yaml.FlowStyle + n.HeadComment = "" + n.LineComment = "" + n.FootComment = "" + } + return &node + } + check := func(gotVal any, wantNode *yaml.Node) { + got, err := yaml.Marshal(clean(gotVal)) + if err != nil { + t.Fatalf("Encoding Value back to yaml failed: %s", err) + } + want, err := yaml.Marshal(clean(wantNode)) + if err != nil { + t.Fatalf("Encoding Want back to yaml failed: %s", err) + } + + if !bytes.Equal(got, want) { + t.Errorf("%s:%d:\nwant:\n%sgot\n%s", f.Name(), wantNode.Line, want, got) + } + } + if tc.Want.Kind != 0 { + check(c.val, &tc.Want) + } + if tc.All.Kind != 0 { + fVal := slices.Collect(c.All()) + check(fVal, &tc.All) + } + }) + } +} diff --git a/src/simd/_gen/unify/value.go b/src/simd/_gen/unify/value.go new file mode 100644 index 00000000000000..ffc25b8728bc51 --- /dev/null +++ b/src/simd/_gen/unify/value.go @@ -0,0 +1,167 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "reflect" +) + +// A Value represents a structured, non-deterministic value consisting of +// strings, tuples of Values, and string-keyed maps of Values. A +// non-deterministic Value will also contain variables, which are resolved via +// an environment as part of a [Closure]. +// +// For debugging, a Value can also track the source position it was read from in +// an input file, and its provenance from other Values. +type Value struct { + Domain Domain + + // A Value has either a pos or parents (or neither). + pos *Pos + parents *[2]*Value +} + +var ( + topValue = &Value{Domain: Top{}} + bottomValue = &Value{Domain: nil} +) + +// NewValue returns a new [Value] with the given domain and no position +// information. +func NewValue(d Domain) *Value { + return &Value{Domain: d} +} + +// NewValuePos returns a new [Value] with the given domain at position p. +func NewValuePos(d Domain, p Pos) *Value { + return &Value{Domain: d, pos: &p} +} + +// newValueFrom returns a new [Value] with the given domain that copies the +// position information of p. +func newValueFrom(d Domain, p *Value) *Value { + return &Value{Domain: d, pos: p.pos, parents: p.parents} +} + +func unified(d Domain, p1, p2 *Value) *Value { + return &Value{Domain: d, parents: &[2]*Value{p1, p2}} +} + +func (v *Value) Pos() Pos { + if v.pos == nil { + return Pos{} + } + return *v.pos +} + +func (v *Value) PosString() string { + var b []byte + for root := range v.Provenance() { + if len(b) > 0 { + b = append(b, ' ') + } + b, _ = root.pos.AppendText(b) + } + return string(b) +} + +func (v *Value) WhyNotExact() string { + if v.Domain == nil { + return "v.Domain is nil" + } + return v.Domain.WhyNotExact() +} + +func (v *Value) Exact() bool { + if v.Domain == nil { + return false + } + return v.Domain.Exact() +} + +// Decode decodes v into a Go value. +// +// v must be exact, except that it can include Top. into must be a pointer. +// [Def]s are decoded into structs. [Tuple]s are decoded into slices. [String]s +// are decoded into strings or ints. Any field can itself be a pointer to one of +// these types. Top can be decoded into a pointer-typed field and will set the +// field to nil. Anything else will allocate a value if necessary. +// +// Any type may implement [Decoder], in which case its DecodeUnified method will +// be called instead of using the default decoding scheme. +func (v *Value) Decode(into any) error { + rv := reflect.ValueOf(into) + if rv.Kind() != reflect.Pointer { + return fmt.Errorf("cannot decode into non-pointer %T", into) + } + return decodeReflect(v, rv.Elem()) +} + +func decodeReflect(v *Value, rv reflect.Value) error { + var ptr reflect.Value + if rv.Kind() == reflect.Pointer { + if rv.IsNil() { + // Transparently allocate through pointers, *except* for Top, which + // wants to set the pointer to nil. + // + // TODO: Drop this condition if I switch to an explicit Optional[T] + // or move the Top logic into Def. + if _, ok := v.Domain.(Top); !ok { + // Allocate the value to fill in, but don't actually store it in + // the pointer until we successfully decode. + ptr = rv + rv = reflect.New(rv.Type().Elem()).Elem() + } + } else { + rv = rv.Elem() + } + } + + var err error + if reflect.PointerTo(rv.Type()).Implements(decoderType) { + // Use the custom decoder. + err = rv.Addr().Interface().(Decoder).DecodeUnified(v) + } else { + err = v.Domain.decode(rv) + } + if err == nil && ptr.IsValid() { + ptr.Set(rv.Addr()) + } + return err +} + +// Decoder can be implemented by types as a custom implementation of [Decode] +// for that type. +type Decoder interface { + DecodeUnified(v *Value) error +} + +var decoderType = reflect.TypeOf((*Decoder)(nil)).Elem() + +// Provenance iterates over all of the source Values that have contributed to +// this Value. +func (v *Value) Provenance() iter.Seq[*Value] { + return func(yield func(*Value) bool) { + var rec func(d *Value) bool + rec = func(d *Value) bool { + if d.pos != nil { + if !yield(d) { + return false + } + } + if d.parents != nil { + for _, p := range d.parents { + if !rec(p) { + return false + } + } + } + return true + } + rec(v) + } +} diff --git a/src/simd/_gen/unify/value_test.go b/src/simd/_gen/unify/value_test.go new file mode 100644 index 00000000000000..54937c68efddf9 --- /dev/null +++ b/src/simd/_gen/unify/value_test.go @@ -0,0 +1,50 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "reflect" + "slices" + "testing" +) + +func ExampleClosure_All_tuple() { + v := mustParse(` +- !sum [1, 2] +- !sum [3, 4] +`) + printYaml(slices.Collect(v.All())) + + // Output: + // - [1, 3] + // - [1, 4] + // - [2, 3] + // - [2, 4] +} + +func ExampleClosure_All_def() { + v := mustParse(` +a: !sum [1, 2] +b: !sum [3, 4] +c: 5 +`) + printYaml(slices.Collect(v.All())) + + // Output: + // - {a: 1, b: 3, c: 5} + // - {a: 1, b: 4, c: 5} + // - {a: 2, b: 3, c: 5} + // - {a: 2, b: 4, c: 5} +} + +func checkDecode[T any](t *testing.T, got *Value, want T) { + var gotT T + if err := got.Decode(&gotT); err != nil { + t.Fatalf("Decode failed: %v", err) + } + if !reflect.DeepEqual(&gotT, &want) { + t.Fatalf("got:\n%s\nwant:\n%s", prettyYaml(gotT), prettyYaml(want)) + } +} diff --git a/src/simd/_gen/unify/yaml.go b/src/simd/_gen/unify/yaml.go new file mode 100644 index 00000000000000..dadcd71dd7b64e --- /dev/null +++ b/src/simd/_gen/unify/yaml.go @@ -0,0 +1,619 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "regexp" + "strings" + + "gopkg.in/yaml.v3" +) + +// ReadOpts provides options to [Read] and related functions. The zero value is +// the default options. +type ReadOpts struct { + // FS, if non-nil, is the file system from which to resolve !import file + // names. + FS fs.FS +} + +// Read reads a [Closure] in YAML format from r, using path for error messages. +// +// It maps YAML nodes into terminal Values as follows: +// +// - "_" or !top _ is the top value ([Top]). +// +// - "_|_" or !bottom _ is the bottom value. This is an error during +// unmarshaling, but can appear in marshaled values. +// +// - "$" or !var is a variable ([Var]). Everywhere the same name +// appears within a single unmarshal operation, it is mapped to the same +// variable. Different unmarshal operations get different variables, even if +// they have the same string name. +// +// - !regex "x" is a regular expression ([String]), as is any string that +// doesn't match "_", "_|_", or "$...". Regular expressions are implicitly +// anchored at the beginning and end. If the string doesn't contain any +// meta-characters (that is, it's a "literal" regular expression), then it's +// treated as an exact string. +// +// - !string "x", or any int, float, bool, or binary value is an exact string +// ([String]). +// +// - !regex [x, y, ...] is an intersection of regular expressions ([String]). +// +// It maps YAML nodes into non-terminal Values as follows: +// +// - Sequence nodes like [x, y, z] are tuples ([Tuple]). +// +// - !repeat [x] is a repeated tuple ([Tuple]), which is 0 or more instances of +// x. There must be exactly one element in the list. +// +// - Mapping nodes like {a: x, b: y} are defs ([Def]). Any fields not listed are +// implicitly top. +// +// - !sum [x, y, z] is a sum of its children. This can be thought of as a union +// of the values x, y, and z, or as a non-deterministic choice between x, y, and +// z. If a variable appears both inside the sum and outside of it, only the +// non-deterministic choice view really works. The unifier does not directly +// implement sums; instead, this is decoded as a fresh variable that's +// simultaneously bound to x, y, and z. +// +// - !import glob is like a !sum, but its children are read from all files +// matching the given glob pattern, which is interpreted relative to the current +// file path. Each file gets its own variable scope. +func Read(r io.Reader, path string, opts ReadOpts) (Closure, error) { + dec := yamlDecoder{opts: opts, path: path, env: topEnv} + v, err := dec.read(r) + if err != nil { + return Closure{}, err + } + return dec.close(v), nil +} + +// ReadFile reads a [Closure] in YAML format from a file. +// +// The file must consist of a single YAML document. +// +// If opts.FS is not set, this sets it to a FS rooted at path's directory. +// +// See [Read] for details. +func ReadFile(path string, opts ReadOpts) (Closure, error) { + f, err := os.Open(path) + if err != nil { + return Closure{}, err + } + defer f.Close() + + if opts.FS == nil { + opts.FS = os.DirFS(filepath.Dir(path)) + } + + return Read(f, path, opts) +} + +// UnmarshalYAML implements [yaml.Unmarshaler]. +// +// Since there is no way to pass [ReadOpts] to this function, it assumes default +// options. +func (c *Closure) UnmarshalYAML(node *yaml.Node) error { + dec := yamlDecoder{path: "", env: topEnv} + v, err := dec.root(node) + if err != nil { + return err + } + *c = dec.close(v) + return nil +} + +type yamlDecoder struct { + opts ReadOpts + path string + + vars map[string]*ident + nSums int + + env envSet +} + +func (dec *yamlDecoder) read(r io.Reader) (*Value, error) { + n, err := readOneNode(r) + if err != nil { + return nil, fmt.Errorf("%s: %w", dec.path, err) + } + + // Decode YAML node to a Value + v, err := dec.root(n) + if err != nil { + return nil, fmt.Errorf("%s: %w", dec.path, err) + } + + return v, nil +} + +// readOneNode reads a single YAML document from r and returns an error if there +// are more documents in r. +func readOneNode(r io.Reader) (*yaml.Node, error) { + yd := yaml.NewDecoder(r) + + // Decode as a YAML node + var node yaml.Node + if err := yd.Decode(&node); err != nil { + return nil, err + } + np := &node + if np.Kind == yaml.DocumentNode { + np = node.Content[0] + } + + // Ensure there are no more YAML docs in this file + if err := yd.Decode(nil); err == nil { + return nil, fmt.Errorf("must not contain multiple documents") + } else if err != io.EOF { + return nil, err + } + + return np, nil +} + +// root parses the root of a file. +func (dec *yamlDecoder) root(node *yaml.Node) (*Value, error) { + // Prepare for variable name resolution in this file. This may be a nested + // root, so restore the current values when we're done. + oldVars, oldNSums := dec.vars, dec.nSums + defer func() { + dec.vars, dec.nSums = oldVars, oldNSums + }() + dec.vars = make(map[string]*ident, 0) + dec.nSums = 0 + + return dec.value(node) +} + +// close wraps a decoded [Value] into a [Closure]. +func (dec *yamlDecoder) close(v *Value) Closure { + return Closure{v, dec.env} +} + +func (dec *yamlDecoder) value(node *yaml.Node) (vOut *Value, errOut error) { + pos := &Pos{Path: dec.path, Line: node.Line} + + // Resolve alias nodes. + if node.Kind == yaml.AliasNode { + node = node.Alias + } + + mk := func(d Domain) (*Value, error) { + v := &Value{Domain: d, pos: pos} + return v, nil + } + mk2 := func(d Domain, err error) (*Value, error) { + if err != nil { + return nil, err + } + return mk(d) + } + + // is tests the kind and long tag of node. + is := func(kind yaml.Kind, tag string) bool { + return node.Kind == kind && node.LongTag() == tag + } + isExact := func() bool { + if node.Kind != yaml.ScalarNode { + return false + } + // We treat any string-ish YAML node as a string. + switch node.LongTag() { + case "!string", "tag:yaml.org,2002:int", "tag:yaml.org,2002:float", "tag:yaml.org,2002:bool", "tag:yaml.org,2002:binary": + return true + } + return false + } + + // !!str nodes provide a short-hand syntax for several leaf domains that are + // also available under explicit tags. To simplify checking below, we set + // strVal to non-"" only for !!str nodes. + strVal := "" + isStr := is(yaml.ScalarNode, "tag:yaml.org,2002:str") + if isStr { + strVal = node.Value + } + + switch { + case is(yaml.ScalarNode, "!var"): + strVal = "$" + node.Value + fallthrough + case strings.HasPrefix(strVal, "$"): + id, ok := dec.vars[strVal] + if !ok { + // We encode different idents with the same string name by adding a + // #N suffix. Strip that off so it doesn't accumulate. This isn't + // meant to be used in user-written input, though nothing stops that. + name, _, _ := strings.Cut(strVal, "#") + id = &ident{name: name} + dec.vars[strVal] = id + dec.env = dec.env.bind(id, topValue) + } + return mk(Var{id: id}) + + case strVal == "_" || is(yaml.ScalarNode, "!top"): + return mk(Top{}) + + case strVal == "_|_" || is(yaml.ScalarNode, "!bottom"): + return nil, errors.New("found bottom") + + case isExact(): + val := node.Value + return mk(NewStringExact(val)) + + case isStr || is(yaml.ScalarNode, "!regex"): + // Any other string we treat as a regex. This will produce an exact + // string anyway if the regex is literal. + val := node.Value + return mk2(NewStringRegex(val)) + + case is(yaml.SequenceNode, "!regex"): + var vals []string + if err := node.Decode(&vals); err != nil { + return nil, err + } + return mk2(NewStringRegex(vals...)) + + case is(yaml.MappingNode, "tag:yaml.org,2002:map"): + var db DefBuilder + for i := 0; i < len(node.Content); i += 2 { + key := node.Content[i] + if key.Kind != yaml.ScalarNode { + return nil, fmt.Errorf("non-scalar key %q", key.Value) + } + val, err := dec.value(node.Content[i+1]) + if err != nil { + return nil, err + } + db.Add(key.Value, val) + } + return mk(db.Build()) + + case is(yaml.SequenceNode, "tag:yaml.org,2002:seq"): + elts := node.Content + vs := make([]*Value, 0, len(elts)) + for _, elt := range elts { + v, err := dec.value(elt) + if err != nil { + return nil, err + } + vs = append(vs, v) + } + return mk(NewTuple(vs...)) + + case is(yaml.SequenceNode, "!repeat") || is(yaml.SequenceNode, "!repeat-unify"): + // !repeat must have one child. !repeat-unify is used internally for + // delayed unification, and is the same, it's just allowed to have more + // than one child. + if node.LongTag() == "!repeat" && len(node.Content) != 1 { + return nil, fmt.Errorf("!repeat must have exactly one child") + } + + // Decode the children to make sure they're well-formed, but otherwise + // discard that decoding and do it again every time we need a new + // element. + var gen []func(e envSet) (*Value, envSet) + origEnv := dec.env + elts := node.Content + for i, elt := range elts { + _, err := dec.value(elt) + if err != nil { + return nil, err + } + // Undo any effects on the environment. We *do* keep any named + // variables that were added to the vars map in case they were + // introduced within the element. + dec.env = origEnv + // Add a generator function + gen = append(gen, func(e envSet) (*Value, envSet) { + dec.env = e + // TODO: If this is in a sum, this tends to generate a ton of + // fresh variables that are different on each branch of the + // parent sum. Does it make sense to hold on to the i'th value + // of the tuple after we've generated it? + v, err := dec.value(elts[i]) + if err != nil { + // It worked the first time, so this really shouldn't hapen. + panic("decoding repeat element failed") + } + return v, dec.env + }) + } + return mk(NewRepeat(gen...)) + + case is(yaml.SequenceNode, "!sum"): + vs := make([]*Value, 0, len(node.Content)) + for _, elt := range node.Content { + v, err := dec.value(elt) + if err != nil { + return nil, err + } + vs = append(vs, v) + } + if len(vs) == 1 { + return vs[0], nil + } + + // A sum is implemented as a fresh variable that's simultaneously bound + // to each of the descendants. + id := &ident{name: fmt.Sprintf("sum%d", dec.nSums)} + dec.nSums++ + dec.env = dec.env.bind(id, vs...) + return mk(Var{id: id}) + + case is(yaml.ScalarNode, "!import"): + if dec.opts.FS == nil { + return nil, fmt.Errorf("!import not allowed (ReadOpts.FS not set)") + } + pat := node.Value + + if !fs.ValidPath(pat) { + // This will result in Glob returning no results. Give a more useful + // error message for this case. + return nil, fmt.Errorf("!import path must not contain '.' or '..'") + } + + ms, err := fs.Glob(dec.opts.FS, pat) + if err != nil { + return nil, fmt.Errorf("resolving !import: %w", err) + } + if len(ms) == 0 { + return nil, fmt.Errorf("!import did not match any files") + } + + // Parse each file + vs := make([]*Value, 0, len(ms)) + for _, m := range ms { + v, err := dec.import1(m) + if err != nil { + return nil, err + } + vs = append(vs, v) + } + + // Create a sum. + if len(vs) == 1 { + return vs[0], nil + } + id := &ident{name: "import"} + dec.env = dec.env.bind(id, vs...) + return mk(Var{id: id}) + } + + return nil, fmt.Errorf("unknown node kind %d %v", node.Kind, node.Tag) +} + +func (dec *yamlDecoder) import1(path string) (*Value, error) { + // Make sure we can open the path first. + f, err := dec.opts.FS.Open(path) + if err != nil { + return nil, fmt.Errorf("!import failed: %w", err) + } + defer f.Close() + + // Prepare the enter path. + oldFS, oldPath := dec.opts.FS, dec.path + defer func() { + dec.opts.FS, dec.path = oldFS, oldPath + }() + + // Enter path, which is relative to the current path's directory. + newPath := filepath.Join(filepath.Dir(dec.path), path) + subFS, err := fs.Sub(dec.opts.FS, filepath.Dir(path)) + if err != nil { + return nil, err + } + dec.opts.FS, dec.path = subFS, newPath + + // Parse the file. + return dec.read(f) +} + +type yamlEncoder struct { + idp identPrinter + e envSet // We track the environment for !repeat nodes. +} + +// TODO: Switch some Value marshaling to Closure? + +func (c Closure) MarshalYAML() (any, error) { + // TODO: If the environment is trivial, just marshal the value. + enc := &yamlEncoder{} + return enc.closure(c), nil +} + +func (c Closure) String() string { + b, err := yaml.Marshal(c) + if err != nil { + return fmt.Sprintf("marshal failed: %s", err) + } + return string(b) +} + +func (v *Value) MarshalYAML() (any, error) { + enc := &yamlEncoder{} + return enc.value(v), nil +} + +func (v *Value) String() string { + b, err := yaml.Marshal(v) + if err != nil { + return fmt.Sprintf("marshal failed: %s", err) + } + return string(b) +} + +func (enc *yamlEncoder) closure(c Closure) *yaml.Node { + enc.e = c.env + var n yaml.Node + n.Kind = yaml.MappingNode + n.Tag = "!closure" + n.Content = make([]*yaml.Node, 4) + n.Content[0] = new(yaml.Node) + n.Content[0].SetString("env") + n.Content[2] = new(yaml.Node) + n.Content[2].SetString("in") + n.Content[3] = enc.value(c.val) + // Fill in the env after we've written the value in case value encoding + // affects the env. + n.Content[1] = enc.env(enc.e) + enc.e = envSet{} // Allow GC'ing the env + return &n +} + +func (enc *yamlEncoder) env(e envSet) *yaml.Node { + var encode func(e *envExpr) *yaml.Node + encode = func(e *envExpr) *yaml.Node { + var n yaml.Node + switch e.kind { + default: + panic("bad kind") + case envZero: + n.SetString("0") + case envUnit: + n.SetString("1") + case envBinding: + var id yaml.Node + id.SetString(enc.idp.unique(e.id)) + n.Kind = yaml.MappingNode + n.Content = []*yaml.Node{&id, enc.value(e.val)} + case envProduct, envSum: + n.Kind = yaml.SequenceNode + if e.kind == envProduct { + n.Tag = "!product" + } else { + n.Tag = "!sum" + } + for _, e2 := range e.operands { + n.Content = append(n.Content, encode(e2)) + } + } + return &n + } + return encode(e.root) +} + +var yamlIntRe = regexp.MustCompile(`^-?[0-9]+$`) + +func (enc *yamlEncoder) value(v *Value) *yaml.Node { + var n yaml.Node + switch d := v.Domain.(type) { + case nil: + // Not allowed by unmarshaler, but useful for understanding when + // something goes horribly wrong. + // + // TODO: We might be able to track useful provenance for this, which + // would really help with debugging unexpected bottoms. + n.SetString("_|_") + return &n + + case Top: + n.SetString("_") + return &n + + case Def: + n.Kind = yaml.MappingNode + for k, elt := range d.All() { + var kn yaml.Node + kn.SetString(k) + n.Content = append(n.Content, &kn, enc.value(elt)) + } + n.HeadComment = v.PosString() + return &n + + case Tuple: + n.Kind = yaml.SequenceNode + if d.repeat == nil { + for _, elt := range d.vs { + n.Content = append(n.Content, enc.value(elt)) + } + } else { + if len(d.repeat) == 1 { + n.Tag = "!repeat" + } else { + n.Tag = "!repeat-unify" + } + // TODO: I'm not positive this will round-trip everything correctly. + for _, gen := range d.repeat { + v, e := gen(enc.e) + enc.e = e + n.Content = append(n.Content, enc.value(v)) + } + } + return &n + + case String: + switch d.kind { + case stringExact: + n.SetString(d.exact) + switch { + // Make this into a "nice" !!int node if I can. + case yamlIntRe.MatchString(d.exact): + n.Tag = "tag:yaml.org,2002:int" + + // Or a "nice" !!bool node. + case d.exact == "false" || d.exact == "true": + n.Tag = "tag:yaml.org,2002:bool" + + // If this doesn't require escaping, leave it as a str node to avoid + // the annoying YAML tags. Otherwise, mark it as an exact string. + // Alternatively, we could always emit a str node with regexp + // quoting. + case d.exact != regexp.QuoteMeta(d.exact): + n.Tag = "!string" + } + return &n + case stringRegex: + o := make([]string, 0, 1) + for _, re := range d.re { + s := re.String() + s = strings.TrimSuffix(strings.TrimPrefix(s, `\A(?:`), `)\z`) + o = append(o, s) + } + if len(o) == 1 { + n.SetString(o[0]) + return &n + } + n.Encode(o) + n.Tag = "!regex" + return &n + } + panic("bad String kind") + + case Var: + // TODO: If Var only appears once in the whole Value and is independent + // in the environment (part of a term that is only over Var), then emit + // this as a !sum instead. + if false { + var vs []*Value // TODO: Get values of this var. + if len(vs) == 1 { + return enc.value(vs[0]) + } + n.Kind = yaml.SequenceNode + n.Tag = "!sum" + for _, elt := range vs { + n.Content = append(n.Content, enc.value(elt)) + } + return &n + } + n.SetString(enc.idp.unique(d.id)) + if !strings.HasPrefix(d.id.name, "$") { + n.Tag = "!var" + } + return &n + } + panic(fmt.Sprintf("unknown domain type %T", v.Domain)) +} diff --git a/src/simd/_gen/unify/yaml_test.go b/src/simd/_gen/unify/yaml_test.go new file mode 100644 index 00000000000000..4f0aef434eac95 --- /dev/null +++ b/src/simd/_gen/unify/yaml_test.go @@ -0,0 +1,202 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "bytes" + "fmt" + "iter" + "log" + "strings" + "testing" + "testing/fstest" + + "gopkg.in/yaml.v3" +) + +func mustParse(expr string) Closure { + var c Closure + if err := yaml.Unmarshal([]byte(expr), &c); err != nil { + panic(err) + } + return c +} + +func oneValue(t *testing.T, c Closure) *Value { + t.Helper() + var v *Value + var i int + for v = range c.All() { + i++ + } + if i != 1 { + t.Fatalf("expected 1 value, got %d", i) + } + return v +} + +func printYaml(val any) { + fmt.Println(prettyYaml(val)) +} + +func prettyYaml(val any) string { + b, err := yaml.Marshal(val) + if err != nil { + panic(err) + } + var node yaml.Node + if err := yaml.Unmarshal(b, &node); err != nil { + panic(err) + } + + // Map lines to start offsets. We'll use this to figure out when nodes are + // "small" and should use inline style. + lines := []int{-1, 0} + for pos := 0; pos < len(b); { + next := bytes.IndexByte(b[pos:], '\n') + if next == -1 { + break + } + pos += next + 1 + lines = append(lines, pos) + } + lines = append(lines, len(b)) + + // Strip comments and switch small nodes to inline style + cleanYaml(&node, lines, len(b)) + + b, err = yaml.Marshal(&node) + if err != nil { + panic(err) + } + return string(b) +} + +func cleanYaml(node *yaml.Node, lines []int, endPos int) { + node.HeadComment = "" + node.FootComment = "" + node.LineComment = "" + + for i, n2 := range node.Content { + end2 := endPos + if i < len(node.Content)-1 { + end2 = lines[node.Content[i+1].Line] + } + cleanYaml(n2, lines, end2) + } + + // Use inline style? + switch node.Kind { + case yaml.MappingNode, yaml.SequenceNode: + if endPos-lines[node.Line] < 40 { + node.Style = yaml.FlowStyle + } + } +} + +func allYamlNodes(n *yaml.Node) iter.Seq[*yaml.Node] { + return func(yield func(*yaml.Node) bool) { + if !yield(n) { + return + } + for _, n2 := range n.Content { + for n3 := range allYamlNodes(n2) { + if !yield(n3) { + return + } + } + } + } +} + +func TestRoundTripString(t *testing.T) { + // Check that we can round-trip a string with regexp meta-characters in it. + const y = `!string test*` + t.Logf("input:\n%s", y) + + v1 := oneValue(t, mustParse(y)) + var buf1 strings.Builder + enc := yaml.NewEncoder(&buf1) + if err := enc.Encode(v1); err != nil { + log.Fatal(err) + } + enc.Close() + t.Logf("after parse 1:\n%s", buf1.String()) + + v2 := oneValue(t, mustParse(buf1.String())) + var buf2 strings.Builder + enc = yaml.NewEncoder(&buf2) + if err := enc.Encode(v2); err != nil { + log.Fatal(err) + } + enc.Close() + t.Logf("after parse 2:\n%s", buf2.String()) + + if buf1.String() != buf2.String() { + t.Fatal("parse 1 and parse 2 differ") + } +} + +func TestEmptyString(t *testing.T) { + // Regression test. Make sure an empty string is parsed as an exact string, + // not a regexp. + const y = `""` + t.Logf("input:\n%s", y) + + v1 := oneValue(t, mustParse(y)) + if !v1.Exact() { + t.Fatal("expected exact string") + } +} + +func TestImport(t *testing.T) { + // Test a basic import + main := strings.NewReader("!import x/y.yaml") + fs := fstest.MapFS{ + // Test a glob import with a relative path + "x/y.yaml": {Data: []byte("!import y/*.yaml")}, + "x/y/z.yaml": {Data: []byte("42")}, + } + cl, err := Read(main, "x.yaml", ReadOpts{FS: fs}) + if err != nil { + t.Fatal(err) + } + x := 42 + checkDecode(t, oneValue(t, cl), &x) +} + +func TestImportEscape(t *testing.T) { + // Make sure an import can't escape its subdirectory. + main := strings.NewReader("!import x/y.yaml") + fs := fstest.MapFS{ + "x/y.yaml": {Data: []byte("!import ../y/*.yaml")}, + "y/z.yaml": {Data: []byte("42")}, + } + _, err := Read(main, "x.yaml", ReadOpts{FS: fs}) + if err == nil { + t.Fatal("relative !import should have failed") + } + if !strings.Contains(err.Error(), "must not contain") { + t.Fatalf("unexpected error %v", err) + } +} + +func TestImportScope(t *testing.T) { + // Test that imports have different variable scopes. + main := strings.NewReader("[!import y.yaml, !import y.yaml]") + fs := fstest.MapFS{ + "y.yaml": {Data: []byte("$v")}, + } + cl1, err := Read(main, "x.yaml", ReadOpts{FS: fs}) + if err != nil { + t.Fatal(err) + } + cl2 := mustParse("[1, 2]") + res, err := Unify(cl1, cl2) + if err != nil { + t.Fatal(err) + } + checkDecode(t, oneValue(t, res), []int{1, 2}) +} From 8b90d48d8cd4a021132ecca314416063e406569f Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 13 Aug 2025 16:59:43 -0400 Subject: [PATCH 134/139] [dev.simd] simd/_gen/simdgen: rewrite etetest.sh Now that simdgen is in the main repo, the end-to-end test script can be much simpler, more robust, and faster. Change-Id: Ie3b12feaf98c327920071c67cfe74f673bb08d3e Reviewed-on: https://go-review.googlesource.com/c/go/+/695978 Auto-Submit: Austin Clements LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/_gen/simdgen/etetest.sh | 81 +++++++++++++++++++------------- 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/src/simd/_gen/simdgen/etetest.sh b/src/simd/_gen/simdgen/etetest.sh index 7b5001ecbbe58c..f6559fcfff9197 100755 --- a/src/simd/_gen/simdgen/etetest.sh +++ b/src/simd/_gen/simdgen/etetest.sh @@ -1,33 +1,48 @@ -#!/bin/bash -x - -cat <<\\EOF - -This is an end-to-end test of Go SIMD. It checks out a fresh Go -repository from the go.simd branch, then generates the SIMD input -files and runs simdgen writing into the fresh repository. - -After that it generates the modified ssa pattern matching files, then -builds the compiler. - -\EOF - -rm -rf go-test -git clone https://go.googlesource.com/go -b dev.simd go-test -go run . -xedPath xeddata -o godefs -goroot ./go-test go.yaml types.yaml categories.yaml -(cd go-test/src/cmd/compile/internal/ssa/_gen ; go run *.go ) -(cd go-test/src ; GOEXPERIMENT=simd ./make.bash ) -(cd go-test/bin; b=`pwd` ; cd ../src/simd/testdata; GOARCH=amd64 $b/go run .) -(cd go-test/bin; b=`pwd` ; cd ../src ; -GOEXPERIMENT=simd GOARCH=amd64 $b/go test -v simd -GOEXPERIMENT=simd $b/go test go/doc -GOEXPERIMENT=simd $b/go test go/build -GOEXPERIMENT=simd $b/go test cmd/api -v -check -$b/go test go/doc -$b/go test go/build -$b/go test cmd/api -v -check - -$b/go test cmd/compile/internal/ssagen -simd=0 -GOEXPERIMENT=simd $b/go test cmd/compile/internal/ssagen -simd=0 -) - -# next, add some tests of SIMD itself +#!/bin/bash + +# This is an end-to-end test of Go SIMD. It updates all generated +# files in this repo and then runs several tests. + +XEDDATA="${XEDDATA:-xeddata}" +if [[ ! -d "$XEDDATA" ]]; then + echo >&2 "Must either set \$XEDDATA or symlink xeddata/ to the XED obj/dgen directory." + exit 1 +fi + +which go >/dev/null || exit 1 +goroot="$(go env GOROOT)" +if [[ ! ../../../.. -ef "$goroot" ]]; then + # We might be able to make this work but it's SO CONFUSING. + echo >&2 "go command in path has GOROOT $goroot" + exit 1 +fi + +if [[ $(go env GOEXPERIMENT) != simd ]]; then + echo >&2 "GOEXPERIMENT=$(go env GOEXPERIMENT), expected simd" + exit 1 +fi + +set -ex + +# Regenerate SIMD files +go run . -o godefs -goroot "$goroot" -xedPath "$XEDDATA" go.yaml types.yaml categories.yaml +# Regenerate SSA files from SIMD rules +go run -C "$goroot"/src/cmd/compile/internal/ssa/_gen . + +# Rebuild compiler +cd "$goroot"/src +go install cmd/compile + +# Tests +GOARCH=amd64 go run -C simd/testdata . +GOARCH=amd64 go test -v simd +go test go/doc go/build +go test cmd/api -v -check -run ^TestCheck$ +go test cmd/compile/internal/ssagen -simd=0 + +# Check tests without the GOEXPERIMENT +GOEXPERIMENT= go test go/doc go/build +GOEXPERIMENT= go test cmd/api -v -check -run ^TestCheck$ +GOEXPERIMENT= go test cmd/compile/internal/ssagen -simd=0 + +# TODO: Add some tests of SIMD itself From 9783f86bc8953c3d93853b2382a4de011c5e26a7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 12 Aug 2025 16:53:44 +0000 Subject: [PATCH 135/139] [dev.simd] cmd/compile: accounts rematerialize ops's output reginfo This CL implements the check for rematerializeable value's output regspec at its remateralization site. It has some potential problems, please see the TODO in regalloc.go. Fixes #70451. Change-Id: Ib624b967031776851136554719e939e9bf116b7c Reviewed-on: https://go-review.googlesource.com/c/go/+/695315 Reviewed-by: David Chase TryBot-Bypass: David Chase --- src/cmd/compile/internal/ssa/func.go | 1 + src/cmd/compile/internal/ssa/func_test.go | 5 ++++ src/cmd/compile/internal/ssa/regalloc.go | 23 +++++++++++++++++ src/cmd/compile/internal/ssa/regalloc_test.go | 25 +++++++++++++++++++ 4 files changed, 54 insertions(+) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 213089a44b8233..0f895e50189536 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -102,6 +102,7 @@ func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func { NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot), + OwnAux: &AuxCall{}, } } diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go index 6923aaa58ecadd..1372c77e7bd2f9 100644 --- a/src/cmd/compile/internal/ssa/func_test.go +++ b/src/cmd/compile/internal/ssa/func_test.go @@ -250,6 +250,11 @@ func Exit(arg string) ctrl { return ctrl{BlockExit, arg, []string{}} } +// Ret specifies a BlockRet. +func Ret(arg string) ctrl { + return ctrl{BlockRet, arg, []string{}} +} + // Eq specifies a BlockAMD64EQ. func Eq(cond, sub, alt string) ctrl { return ctrl{BlockAMD64EQ, cond, []string{sub, alt}} diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 4e7f66581f8cc7..3e6fe0d128ae0d 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -609,6 +609,29 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos } else if v.rematerializeable() { // Rematerialize instead of loading from the spill location. c = v.copyIntoWithXPos(s.curBlock, pos) + // We need to consider its output mask and potentially issue a Copy + // if there are register mask conflicts. + // This currently happens for the SIMD package only between GP and FP + // register. Because Intel's vector extension can put integer value into + // FP, which is seen as a vector. Example instruction: VPSLL[BWDQ] + // Because GP and FP masks do not overlap, mask & outputMask == 0 + // detects this situation thoroughly. + sourceMask := s.regspec(c).outputs[0].regs + if mask&sourceMask == 0 && !onWasmStack { + s.setOrig(c, v) + s.assignReg(s.allocReg(sourceMask, v), v, c) + // v.Type for the new OpCopy is likely wrong and it might delay the problem + // until ssa to asm lowering, which might need the types to generate the right + // assembly for OpCopy. For Intel's GP to FP move, it happens to be that + // MOV instruction has such a variant so it happens to be right. + // But it's unclear for other architectures or situations, and the problem + // might be exposed when the assembler sees illegal instructions. + // Right now make we still pick v.Type, because at least its size should be correct + // for the rematerialization case the amd64 SIMD package exposed. + // TODO: We might need to figure out a way to find the correct type or make + // the asm lowering use reg info only for OpCopy. + c = s.curBlock.NewValue1(pos, OpCopy, v.Type, c) + } } else { // Load v from its spill location. spill := s.makeSpill(v, s.curBlock) diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index 0f69b852d12971..79f94da0114f93 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -6,6 +6,7 @@ package ssa import ( "cmd/compile/internal/types" + "cmd/internal/obj/x86" "fmt" "testing" ) @@ -279,3 +280,27 @@ func numOps(b *Block, op Op) int { } return n } + +func TestRematerializeableRegCompatible(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpAMD64MOVLconst, c.config.Types.Int32, 1, nil), + Valu("a", OpAMD64POR, c.config.Types.Float32, 0, nil, "x", "x"), + Valu("res", OpMakeResult, types.NewResults([]*types.Type{c.config.Types.Float32, types.TypeMem}), 0, nil, "a", "mem"), + Ret("res"), + ), + ) + regalloc(f.f) + checkFunc(f.f) + moveFound := false + for _, v := range f.f.Blocks[0].Values { + if v.Op == OpCopy && x86.REG_X0 <= v.Reg() && v.Reg() <= x86.REG_X31 { + moveFound = true + } + } + if !moveFound { + t.Errorf("Expects an Copy to be issued, but got: %+v", f.f) + } +} From 908e3e8166898a3b5f7c961e774f681da2a765bc Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 14 Aug 2025 16:56:28 +0000 Subject: [PATCH 136/139] [dev.simd] cmd/compile: make (most) move/load/store lowering use reg and width only This CL tries to clean up the move/load/store lowering a bit. After CL 695315 the register information for instructions are expected to be correct for SIMD, but we still need to pick the right instruction during ssa to asm lowering. The code before this CL should be working correctly, but MOVSSconst and MOVSDconst contains duplicated codes, this CL removes that. This CL also rewrite move/load/storeByTypeAndReg to use only the width and reg for all non-SIMD types, which is more consistent. Change-Id: I76c14f3d0140bcbd4fbea0df275fee0202a3b7d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/696175 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 118 ++++++++------------------ 1 file changed, 35 insertions(+), 83 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 25eca691b57dc0..56d0ab28673b16 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -63,6 +63,7 @@ func loadByTypeAndReg(t *types.Type, r int16) obj.As { } // storeByTypeAndReg returns the store instruction of the given type/register. +// It's also used for loading const to a reg. func storeByTypeAndReg(t *types.Type, r int16) obj.As { width := t.Size() if t.IsSIMD() { @@ -75,67 +76,38 @@ func storeByTypeAndReg(t *types.Type, r int16) obj.As { case 8: return x86.AMOVSD } - } else { - switch width { - case 1: - return x86.AMOVB - case 2: - return x86.AMOVW - case 4: - return x86.AMOVL - case 8: - return x86.AMOVQ - case 16: - return x86.AMOVUPS - } + } + switch width { + case 1: + return x86.AMOVB + case 2: + return x86.AMOVW + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS } panic(fmt.Sprintf("bad store type %v", t)) } -// storeByType returns the store instruction of the given type. -func storeByType(t *types.Type) obj.As { +// moveByTypeAndReg returns the reg->reg move instruction of the given type/registers. +func moveByTypeAndReg(t *types.Type, dest, src int16) obj.As { width := t.Size() - if t.IsFloat() { - switch width { - case 4: - return x86.AMOVSS - case 8: - return x86.AMOVSD - } - } else if t.IsSIMD() { - return simdMov(width) - } else { - switch width { - case 1: - return x86.AMOVB - case 2: - return x86.AMOVW - case 4: - return x86.AMOVL - case 8: - return x86.AMOVQ - case 16: - return x86.AMOVUPS - } + if t.IsSIMD() { + return simdMov(t.Size()) } - panic(fmt.Sprintf("bad store type %v", t)) -} - -// moveByType returns the reg->reg move instruction of the given type. -func moveByType(from, to *ssa.Value) obj.As { - toT := to.Type - fromR, toR := from.Reg(), to.Reg() - if isFPReg(fromR) && isFPReg(toR) && toT.IsFloat() { + // fp -> fp + if isFPReg(dest) && isFPReg(src) { // Moving the whole sse2 register is faster // than moving just the correct low portion of it. // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. return x86.AMOVUPS } - if toT.IsSIMD() { - return simdMov(toT.Size()) - } - switch toT.Size() { + // gp -> fp, fp -> gp, gp -> gp + switch width { case 1: // Avoids partial register write return x86.AMOVL @@ -147,9 +119,8 @@ func moveByType(from, to *ssa.Value) obj.As { return x86.AMOVQ case 16: return x86.AMOVUPS // int128s are in SSE registers - default: - panic(fmt.Sprintf("bad int register width %d:%v", toT.Size(), toT)) } + panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t)) } // opregreg emits instructions for @@ -645,7 +616,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // But this requires a way for regalloc to know that SRC might be // clobbered by this instruction. t := v.RegTmp() - opregreg(s, moveByType(v.Args[1], v), t, v.Args[1].Reg()) + opregreg(s, moveByTypeAndReg(v.Type, t, v.Args[1].Reg()), t, v.Args[1].Reg()) p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -820,34 +791,15 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst: x := v.Reg() - a := v.Op.Asm() - if x < x86.REG_X0 { // not an FP register - if v.AuxInt == 0 && v.Aux == nil { - opregreg(s, x86.AXORL, x, x) - break - } - c := v.AuxInt - switch v.Type.Size() { - case 4: - a = x86.AMOVL - c = int64(math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt))))) - case 8: - a = x86.AMOVQ - default: - panic(fmt.Sprintf("unexpected type width for float const into non-float register, %v", v)) - } - p := s.Prog(a) - p.From.Type = obj.TYPE_CONST - p.From.Offset = c - p.To.Type = obj.TYPE_REG - p.To.Reg = x - } else { - p := s.Prog(a) - p.From.Type = obj.TYPE_FCONST - p.From.Val = math.Float64frombits(uint64(v.AuxInt)) - p.To.Type = obj.TYPE_REG - p.To.Reg = x + if !isFPReg(x) && v.AuxInt == 0 && v.Aux == nil { + opregreg(s, x86.AXORL, x, x) + break } + p := s.Prog(storeByTypeAndReg(v.Type, x)) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload: @@ -1245,7 +1197,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { y = simdOrMaskReg(v) } if x != y { - opregreg(s, moveByType(v.Args[0], v), y, x) + opregreg(s, moveByTypeAndReg(v.Type, y, x), y, x) } case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -1270,7 +1222,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if v.Type.IsSIMD() { r = simdOrMaskReg(v.Args[0]) } - p := s.Prog(storeByType(v.Type)) + p := s.Prog(storeByTypeAndReg(v.Type, r)) p.From.Type = obj.TYPE_REG p.From.Reg = r ssagen.AddrAuto(&p.To, v) @@ -1287,7 +1239,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) s.FuncInfo().AddSpill( - obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByType(ap.Type)}) + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByTypeAndReg(ap.Type, ap.Reg)}) } v.Block.Func.RegArgs = nil ssagen.CheckArgReg(v) @@ -2182,7 +2134,7 @@ func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir } func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p = pp.Append(p, storeByTypeAndReg(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) p.To.Name = obj.NAME_PARAM p.To.Sym = n.Linksym() p.Pos = p.Pos.WithNotStmt() From 7380213a4eca31fb0da3b164a129eb5fd699d796 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 14 Aug 2025 20:21:37 +0000 Subject: [PATCH 137/139] [dev.simd] cmd/compile: make move/load/store dependent only on reg and width This CL improve its previous CL by implementing move/load/storeByRegWidth. It should have not touched the compilation path of complex128, but as a side effect, the move/load/store of 16-byte SIMD vectors in X0 to X15 are now compiled to MOVUPS instead of VMOVDQU. These functions could be used in MOV*const, but this CL does not do that because we haven't seen problems of them yet. But in the future if we see problems calling these functions to find the right asm might be handy. Change-Id: I9b76e65eef8155479d3e288402aa96bc29a4f7cb Reviewed-on: https://go-review.googlesource.com/c/go/+/696255 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 108 ++++++++++++++++---------- 1 file changed, 67 insertions(+), 41 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 56d0ab28673b16..3ae3c6176460fd 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -47,11 +47,19 @@ func isFPReg(r int16) bool { return x86.REG_X0 <= r && r <= x86.REG_Z31 } -// loadByTypeAndReg returns the load instruction of the given type/register. -func loadByTypeAndReg(t *types.Type, r int16) obj.As { - // Avoid partial register write - if !t.IsFloat() { - switch t.Size() { +func isKReg(r int16) bool { + return x86.REG_K0 <= r && r <= x86.REG_K7 +} + +func isLowFPReg(r int16) bool { + return x86.REG_X0 <= r && r <= x86.REG_X15 +} + +// loadByRegWidth returns the load instruction of the given register of a given width. +func loadByRegWidth(r int16, width int64) obj.As { + // Avoid partial register write for GPR + if !isFPReg(r) && !isKReg(r) { + switch width { case 1: return x86.AMOVBLZX case 2: @@ -59,24 +67,35 @@ func loadByTypeAndReg(t *types.Type, r int16) obj.As { } } // Otherwise, there's no difference between load and store opcodes. - return storeByTypeAndReg(t, r) + return storeByRegWidth(r, width) } -// storeByTypeAndReg returns the store instruction of the given type/register. +// storeByRegWidth returns the store instruction of the given register of a given width. // It's also used for loading const to a reg. -func storeByTypeAndReg(t *types.Type, r int16) obj.As { - width := t.Size() - if t.IsSIMD() { - return simdMov(width) - } +func storeByRegWidth(r int16, width int64) obj.As { if isFPReg(r) { switch width { case 4: return x86.AMOVSS case 8: return x86.AMOVSD + case 16: + // int128s are in SSE registers + if isLowFPReg(r) { + return x86.AMOVUPS + } else { + return x86.AVMOVDQU + } + case 32: + return x86.AVMOVDQU + case 64: + return x86.AVMOVDQU64 } } + if isKReg(r) { + return x86.AKMOVQ + } + // gp switch width { case 1: return x86.AMOVB @@ -86,25 +105,32 @@ func storeByTypeAndReg(t *types.Type, r int16) obj.As { return x86.AMOVL case 8: return x86.AMOVQ - case 16: - return x86.AMOVUPS } - panic(fmt.Sprintf("bad store type %v", t)) + panic(fmt.Sprintf("bad store reg=%v, width=%d", r, width)) } -// moveByTypeAndReg returns the reg->reg move instruction of the given type/registers. -func moveByTypeAndReg(t *types.Type, dest, src int16) obj.As { - width := t.Size() - if t.IsSIMD() { - return simdMov(t.Size()) - } +// moveByRegsWidth returns the reg->reg move instruction of the given dest/src registers of a given width. +func moveByRegsWidth(dest, src int16, width int64) obj.As { // fp -> fp if isFPReg(dest) && isFPReg(src) { // Moving the whole sse2 register is faster // than moving just the correct low portion of it. // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. - return x86.AMOVUPS + if isLowFPReg(dest) && isLowFPReg(src) && width <= 16 { + return x86.AMOVUPS + } + if width <= 32 { + return x86.AVMOVDQU + } + return x86.AVMOVDQU64 + } + // k -> gp, gp -> k, k -> k + if isKReg(dest) || isKReg(src) { + if isFPReg(dest) || isFPReg(src) { + panic(fmt.Sprintf("bad move, src=%v, dest=%v, width=%d", src, dest, width)) + } + return x86.AKMOVQ } // gp -> fp, fp -> gp, gp -> gp switch width { @@ -118,9 +144,18 @@ func moveByTypeAndReg(t *types.Type, dest, src int16) obj.As { case 8: return x86.AMOVQ case 16: - return x86.AMOVUPS // int128s are in SSE registers + if isLowFPReg(dest) && isLowFPReg(src) { + // int128s are in SSE registers + return x86.AMOVUPS + } else { + return x86.AVMOVDQU + } + case 32: + return x86.AVMOVDQU + case 64: + return x86.AVMOVDQU64 } - panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t)) + panic(fmt.Sprintf("bad move, src=%v, dest=%v, width=%d", src, dest, width)) } // opregreg emits instructions for @@ -616,7 +651,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // But this requires a way for regalloc to know that SRC might be // clobbered by this instruction. t := v.RegTmp() - opregreg(s, moveByTypeAndReg(v.Type, t, v.Args[1].Reg()), t, v.Args[1].Reg()) + opregreg(s, moveByRegsWidth(t, v.Args[1].Reg(), v.Type.Size()), t, v.Args[1].Reg()) p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -795,7 +830,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { opregreg(s, x86.AXORL, x, x) break } - p := s.Prog(storeByTypeAndReg(v.Type, x)) + p := s.Prog(storeByRegWidth(x, v.Type.Size())) p.From.Type = obj.TYPE_FCONST p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.To.Type = obj.TYPE_REG @@ -1197,7 +1232,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { y = simdOrMaskReg(v) } if x != y { - opregreg(s, moveByTypeAndReg(v.Type, y, x), y, x) + opregreg(s, moveByRegsWidth(y, x, v.Type.Size()), y, x) } case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -1205,7 +1240,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { return } r := v.Reg() - p := s.Prog(loadByTypeAndReg(v.Type, r)) + p := s.Prog(loadByRegWidth(r, v.Type.Size())) ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG if v.Type.IsSIMD() { @@ -1222,7 +1257,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if v.Type.IsSIMD() { r = simdOrMaskReg(v.Args[0]) } - p := s.Prog(storeByTypeAndReg(v.Type, r)) + p := s.Prog(storeByRegWidth(r, v.Type.Size())) p.From.Type = obj.TYPE_REG p.From.Reg = r ssagen.AddrAuto(&p.To, v) @@ -1239,7 +1274,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) s.FuncInfo().AddSpill( - obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByTypeAndReg(ap.Type, ap.Reg)}) + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByRegWidth(ap.Reg, ap.Type.Size()), Spill: storeByRegWidth(ap.Reg, ap.Type.Size())}) } v.Block.Func.RegArgs = nil ssagen.CheckArgReg(v) @@ -2123,7 +2158,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { } func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p := s.Prog(loadByTypeAndReg(t, reg)) + p := s.Prog(loadByRegWidth(reg, t.Size())) p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_AUTO p.From.Sym = n.Linksym() @@ -2134,7 +2169,7 @@ func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir } func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p = pp.Append(p, storeByTypeAndReg(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p = pp.Append(p, storeByRegWidth(reg, t.Size()), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) p.To.Name = obj.NAME_PARAM p.To.Sym = n.Linksym() p.Pos = p.Pos.WithNotStmt() @@ -2220,12 +2255,3 @@ func simdCheckRegOnly(v *ssa.Value, regStart, regEnd int16) int16 { } return v.Reg() } - -func simdMov(width int64) obj.As { - if width >= 64 { - return x86.AVMOVDQU64 - } else if width >= 16 { - return x86.AVMOVDQU - } - return x86.AKMOVQ -} From 9a934d5080ee103c43e92c35e213b97a92b8bd4a Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 14 Aug 2025 17:26:15 -0400 Subject: [PATCH 138/139] [dev.simd] cmd/compile, simd: added methods for "float" GetElem This also required a "always use operation with least OverrideBase" filter in choosing the machine instructions. The order of generated HW operations is slightly modified because the Float version of GetElem appears earlier in the sorted operations list, though it is not chosen to generate the HW Op. Change-Id: I95fa67afca9c8b6f4f18941fdcaf69afdad8055b Reviewed-on: https://go-review.googlesource.com/c/go/+/696375 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 8 ++-- .../compile/internal/ssa/_gen/simdAMD64.rules | 2 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 4 +- .../internal/ssa/_gen/simdgenericOps.go | 2 + src/cmd/compile/internal/ssa/opGen.go | 42 ++++++++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 6 +++ .../compile/internal/ssagen/simdintrinsics.go | 2 + src/simd/_gen/simdgen/gen_simdMachineOps.go | 37 +++++++++++++--- src/simd/_gen/simdgen/godefs.go | 2 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 14 +++++++ src/simd/ops_amd64.go | 14 +++++++ 11 files changed, 106 insertions(+), 27 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 3ec8b484fb8a6d..466e6c9cc74ff0 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1128,10 +1128,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPINSRW128: p = simdVgpvImm8(s, v) - case ssa.OpAMD64VPEXTRB128, - ssa.OpAMD64VPEXTRW128, - ssa.OpAMD64VPEXTRD128, - ssa.OpAMD64VPEXTRQ128: + case ssa.OpAMD64VPEXTRD128, + ssa.OpAMD64VPEXTRQ128, + ssa.OpAMD64VPEXTRB128, + ssa.OpAMD64VPEXTRW128: p = simdVgpImm8(s, v) case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 9670f035ba880b..d64f36cf74e9c2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -524,6 +524,8 @@ (GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) +(GetElemFloat32x4 ...) => (VPEXTRD128 ...) +(GetElemFloat64x2 ...) => (VPEXTRQ128 ...) (GetElemInt8x16 ...) => (VPEXTRB128 ...) (GetElemInt16x8 ...) => (VPEXTRW128 ...) (GetElemInt32x4 ...) => (VPEXTRD128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 61abaa5e9781dc..ba73453ffe1298 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -978,10 +978,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "UInt8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "UInt8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4f2b1a91215891..d98c0d8152acef 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1720,6 +1720,8 @@ func simdGenericOps() []opData { {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GetElemFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 60ef3853524733..b45cccd96bbb22 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2201,10 +2201,10 @@ const ( OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEQBMasked512 - OpAMD64VPEXTRB128 - OpAMD64VPEXTRW128 OpAMD64VPEXTRD128 OpAMD64VPEXTRQ128 + OpAMD64VPEXTRB128 + OpAMD64VPEXTRW128 OpAMD64VEXTRACTF128128 OpAMD64VEXTRACTF64X4256 OpAMD64VEXTRACTI128128 @@ -6352,6 +6352,8 @@ const ( OpGaloisFieldAffineTransformUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformUint8x64 + OpGetElemFloat32x4 + OpGetElemFloat64x2 OpGetElemInt8x16 OpGetElemInt16x8 OpGetElemInt32x4 @@ -34154,13 +34156,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRB128", + name: "VPEXTRD128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRB, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34168,13 +34170,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRW128", + name: "VPEXTRQ128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRW, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34182,13 +34184,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRD128", + name: "VPEXTRB128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRD, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34196,13 +34198,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VPEXTRW128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRQ, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -72920,6 +72922,18 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "GetElemFloat32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "GetElemFloat64x2", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, { name: "GetElemInt8x16", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6e5e212fbeb00b..69393014c78a85 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2186,6 +2186,12 @@ func rewriteValueAMD64(v *Value) bool { case OpGetClosurePtr: v.Op = OpAMD64LoweredGetClosurePtr return true + case OpGetElemFloat32x4: + v.Op = OpAMD64VPEXTRD128 + return true + case OpGetElemFloat64x2: + v.Op = OpAMD64VPEXTRQ128 + return true case OpGetElemInt16x8: v.Op = OpAMD64VPEXTRW128 return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 682a37e91ba274..be3d917f8ff704 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -536,6 +536,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GetElem", opLen1Imm8(ssa.OpGetElemFloat32x4, types.Types[types.TFLOAT32], 0), sys.AMD64) + addF(simdPackage, "Float64x2.GetElem", opLen1Imm8(ssa.OpGetElemFloat64x2, types.Types[types.TFLOAT64], 0), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go index 64918e5543a3f3..f4d91a0c8ec2e0 100644 --- a/src/simd/_gen/simdgen/gen_simdMachineOps.go +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -46,22 +46,47 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { OpsData []opData OpsDataImm []opData } - seen := map[string]struct{}{} + regInfoSet := map[string]bool{ "v11": true, "v21": true, "v2k": true, "v2kv": true, "v2kk": true, "vkv": true, "v31": true, "v3kv": true, "vgpv": true, "vgp": true, "vfpv": true, "vfpkv": true, "w11": true, "w21": true, "w2k": true, "w2kw": true, "w2kk": true, "wkw": true, "w31": true, "w3kw": true, "wgpw": true, "wgp": true, "wfpw": true, "wfpkw": true} opsData := make([]opData, 0) opsDataImm := make([]opData, 0) + + // Determine the "best" version of an instruction to use + best := make(map[string]Operation) + var mOpOrder []string + countOverrides := func(s []Operand) int { + a := 0 + for _, o := range s { + if o.OverwriteBase != nil { + a++ + } + } + return a + } for _, op := range ops { - shapeIn, shapeOut, maskType, _, gOp := op.shape() + _, _, maskType, _, gOp := op.shape() asm := machineOpName(maskType, gOp) + other, ok := best[asm] + if !ok { + best[asm] = op + mOpOrder = append(mOpOrder, asm) + continue + } + // see if "op" is better than "other" + if countOverrides(op.In)+countOverrides(op.Out) < countOverrides(other.In)+countOverrides(other.Out) { + best[asm] = op + } + } + + for _, asm := range mOpOrder { + op := best[asm] + shapeIn, shapeOut, _, _, gOp := op.shape() // TODO: all our masked operations are now zeroing, we need to generate machine ops with merging masks, maybe copy // one here with a name suffix "Merging". The rewrite rules will need them. - if _, ok := seen[asm]; ok { - continue - } - seen[asm] = struct{}{} + regInfo, err := op.regShape() if err != nil { panic(err) diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 0022140aaab177..22decb9d7e69c9 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -67,7 +67,7 @@ type rawOperation struct { NoTypes *string // If non-nil, all generation in gen_simdGenericOps and gen_simdrules will be skipped. NoGenericOps *string - // If non-nil, this string will be attached to the machine ssa op name. + // If non-nil, this string will be attached to the machine ssa op name. E.g. "const" SSAVariant *string } diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 71981c12af7d12..0e5997deebbc35 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -45,6 +45,20 @@ base: $b bits: $e +- go: GetElem + asm: "VPEXTR[DQ]" + in: + - class: vreg + base: int + elemBits: $e + OverwriteBase: float + - *imm + out: + - class: greg + base: int + bits: $e + OverwriteBase: float + - go: "SetHi|SetLo" asm: "VINSERTI128|VINSERTI64X4" inVariant: [] diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index d78bb699eaac23..8da3cd18175aba 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3470,6 +3470,20 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 /* GetElem */ +// GetElem retrieves a single constant-indexed element's value. +// +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Float32x4) GetElem(index uint8) float32 + +// GetElem retrieves a single constant-indexed element's value. +// +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Float64x2) GetElem(index uint8) float64 + // GetElem retrieves a single constant-indexed element's value. // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. From 8ccd6c20347dfb6095a572ec3dc43f19c60f622c Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 18 Aug 2025 15:04:45 -0400 Subject: [PATCH 139/139] [dev.simd] simd, cmd/compile: mark BLEND instructions as not-zero-mask Change-Id: Ida9f29423d62a25be41dcf637ffb9275b7cae642 Reviewed-on: https://go-review.googlesource.com/c/go/+/697055 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 4 ---- src/simd/_gen/simdgen/ops/Moves/go.yaml | 2 ++ src/simd/simd_test.go | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 466e6c9cc74ff0..1ab4c88cba7870 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1654,10 +1654,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VPBLENDMBMasked512, - ssa.OpAMD64VPBLENDMWMasked512, - ssa.OpAMD64VPBLENDMDMasked512, - ssa.OpAMD64VPBLENDMQMasked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 0e5997deebbc35..d4d1b4b9bd3445 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -253,6 +253,7 @@ # That means the signature is wrong. - go: blend asm: VPBLENDVB + zeroing: false in: - &v go: $t @@ -269,6 +270,7 @@ # For AVX512 - go: blend asm: VPBLENDM[BWDQ] + zeroing: false in: - &v go: $t diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 831dc4f268bdfb..ce982409ea9fba 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -397,6 +397,28 @@ func TestMergeFloat(t *testing.T) { checkSlices[float64](t, s, []float64{4, 2, 3, 4}) } +func TestMergeFloat512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + a := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) + b := simd.LoadFloat64x8Slice([]float64{8, 7, 6, 5, 4, 2, 3, 1}) + g := a.Greater(b) + k := make([]int64, 8, 8) + g.AsInt64x8().StoreSlice(k) + checkSlices[int64](t, k, []int64{0, 0, 0, 0, -1, -1, -1, -1}) + c := a.Merge(b, g) + d := a.Masked(g) + + s := make([]float64, 8, 8) + c.StoreSlice(s) + checkSlices[float64](t, s, []float64{8, 7, 6, 5, 5, 6, 7, 8}) + + d.StoreSlice(s) + checkSlices[float64](t, s, []float64{0, 0, 0, 0, 5, 6, 7, 8}) +} + var ro uint8 = 2 func TestRotateAllVariable(t *testing.T) {